repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
borkode/python-games
https://github.com/borkode/python-games
61c65e80d53e986edd10fd51fa3e51d5aaf5b527
7f47b0c28d46142c33f2323709410f51dbbb4989
5cb3b5f0f4eea484bf8f5fcd293d9952522b68cd
refs/heads/master
2020-04-22T13:01:52.483268
2019-02-12T21:23:28
2019-02-12T21:23:28
170,394,417
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6706114411354065, "alphanum_fraction": 0.685404360294342, "avg_line_length": 31.74193572998047, "blob_id": "7f11ebc6a4ab0a615666a24c22a00a7d5da1477f", "content_id": "93a38284cc3e3b2645676b863cc1ff5d7c0e797a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1014, "license_type": "no_license", "max_line_length": 102, "num_lines": 31, "path": "/Snake/scripts/pg_functions.py", "repo_name": "borkode/python-games", "src_encoding": "UTF-8", "text": "import pygame,os\nglobal screen,screensize\ndirectory = str(os.path.dirname(os.path.abspath(__file__)))\nfdir = directory+\"\\\\Font.ttf\"\ndef init(scrn,size):\n global screen,screensize\n screen = scrn\n screensize = size\ndef rFont (size):\n return pygame.font.Font(fdir,size)\ndef writeText(text,x,y,size,color,bgcolor):\n label = rFont(size).render(text,1,color)\n tsize = rFont(size).size(text)\n whd = [tsize[0],tsize[1]]\n re = pygame.Rect(x,y,whd[0],whd[1])\n pygame.draw.rect(screen,bgcolor,re)\n screen.blit(label,(x,y))\n return re\ndef getClicked(rect):\n return pygame.mouse.get_pressed()[0] & pygame.Rect(rect).collidepoint(pygame.mouse.get_pos()) == 1\ndef getDir():\n return directory\ndef getHovering(rect):\n return pygame.Rect(rect).collidepoint(pygame.mouse.get_pos()) == 1\ndef centerPx (fs,txt):\n font = rFont(fs)\n return [screensize[0]/2-font.size(txt)[0]/2,screensize[1]/2-font.size(txt)[1]]\ndef getScreen():\n return screen\ndef getScreenSize():\n return screensize" }, { "alpha_fraction": 0.5370558500289917, "alphanum_fraction": 0.5837563276290894, "avg_line_length": 29.8125, "blob_id": "48945239aded81e98f9536c1033c7c2929db4d04", "content_id": "224f9572988eada39194e3a68836387d233745db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 985, "license_type": "no_license", "max_line_length": 96, "num_lines": 32, "path": "/Snake/scripts/Draw.py", "repo_name": "borkode/python-games", "src_encoding": "UTF-8", "text": "import pygame, math, time\nfrom scripts import pg_functions as func\ncolors = [(0,0,0),(255,0,0),(0,255,0)]\npygame.init()\nglobal Map,sz,screen,screensize\nscreen = func.getScreen()\nscreensize = func.getScreenSize()\ndef setMap(m,s):\n global Map,sz\n Map = m\n sz = s\n global ps\n ps = [math.floor(screensize[0]/sz[0]),math.floor(screensize[1]/sz[1])]\ndef retXY(x,y):\n return Map[y*sz[0]+x]\ndef retID(x,y):\n return y*sz[0]+x\ndef printMap():\n for i in range(sz[1]):\n st=\"\"\n for x in range(sz[0]):\n st += str(Map[x+i*sz[0]])\n print(st)\ndef drawMap():\n for y in range(sz[1]):\n for x in range(sz[0]):\n pygame.draw.rect(screen,colors[retXY(x,y)],pygame.Rect(x*ps[0],y*ps[1],ps[0],ps[1]))\n for x in range(sz[0]):\n pygame.draw.line(screen,(0,0,0),(x*ps[0],0),(x*ps[0],screensize[1]),2)\n for y in range(sz[1]):\n pygame.draw.line(screen,(0,0,0),(0,y*ps[1]),(screensize[0],y*ps[1]),2)\n pygame.display.flip()" }, { "alpha_fraction": 0.604092538356781, "alphanum_fraction": 0.6708185076713562, "avg_line_length": 29.405405044555664, "blob_id": "b7d43a573f008b39568bdd85b047c749c305ca33", "content_id": "bcb78b5b3e3a627fcc7d6507b17d205be808a403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1124, "license_type": "no_license", "max_line_length": 126, "num_lines": 37, "path": "/Snake/start.py", "repo_name": "borkode/python-games", "src_encoding": "UTF-8", "text": "import pygame,ctypes,time\nfrom scripts import pg_functions as func\nfrom scripts import PiControl\nuser32 = ctypes.windll.user32\nonStartScreen=True\nsize = [800,480] # adjust for monitor\npygame.init()\npygame.mouse.set_visible(False)\ndirectory = func.getDir()\nscreen = pygame.display.set_mode(size,pygame.FULLSCREEN)\nfunc.init(screen,size)\npygame.display.set_caption(\"PI ZERO GAME\")\ndone = False\nclock = pygame.time.Clock()\nglobal ptext\nptext = \"PRESS A TO PLAY\"\ni = 0\nwhile not done:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done=True\n if i == 120:\n ptext = \"\"\n elif i >= 200:\n ptext = \"PRESS A TO PLAY\"\n i=0\n screen.fill((0,0,0))\n func.writeText(\"PySnake\",func.centerPx(35,\"PySnake\")[0],func.centerPx(35,\"PySnake\")[1]-35/2,35,(255,255,255),(0,0,0))\n play_button = func.writeText(ptext,func.centerPx(15,ptext)[0],func.centerPx(15,ptext)[1]+25+15/2,15,(255,255,255),(0,0,0))\n pygame.display.flip()\n i+=1\n if PiControl.buttonChecked(\"A\"):\n break\nscreen.fill((0,0,0))\npygame.display.flip()\nfrom scripts import Map" }, { "alpha_fraction": 0.5519287586212158, "alphanum_fraction": 0.6454005837440491, "avg_line_length": 36.5, "blob_id": "8fccba7d2b7052bedb86a1f7c187d69b12b08ceb", "content_id": "5e984ec253f2899226c898c2e1bb861cb8135666", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "no_license", "max_line_length": 105, "num_lines": 18, "path": "/Snake/scripts/lose.py", "repo_name": "borkode/python-games", "src_encoding": "UTF-8", "text": "import time,pygame,os\nfrom scripts import pg_functions as func\nfrom scripts import PiControl\npygame.init()\nscreen = func.getScreen()\nscreensize = func.getScreenSize\ndef Lose(score):\n sct = \"SCORE: \"+str(score)+\".\"\n screen.fill((0,0,0))\n func.writeText(\"GAME OVER\",func.centerPx(32,\"GAME OVER\")[0],50,32,(255,255,255),(0,0,0))\n func.writeText(sct,func.centerPx(16,sct)[0],90,16,(255,255,255),(0,0,0))\n func.writeText(\"PRESS B TO EXIT\",func.centerPx(16,\"PRESS B TO EXIT\")[0],110,16,(255,255,255),(0,0,0))\n pygame.display.flip()\n while True:\n time.sleep(0.1)\n if PiControl.buttonChecked(\"B\")==True:\n pygame.quit()\n break" }, { "alpha_fraction": 0.5823104977607727, "alphanum_fraction": 0.6086642742156982, "avg_line_length": 23.741071701049805, "blob_id": "51e421ccc5508357725d9bd56f44da9843cecb39", "content_id": "b80312d8b44ddf2e5887900e5e8e68be43018571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2770, "license_type": "no_license", "max_line_length": 107, "num_lines": 112, "path": "/Snake/scripts/Map.py", "repo_name": "borkode/python-games", "src_encoding": "UTF-8", "text": "import time,random,os,math\nfrom scripts import Draw, PiControl\nglobal yp,xp,screen,foodBeingEaten\nyp=0\nxp=0\nfoodBeingEaten = True\nsz = [40,20]\nglobal snakex,snakey,sheadp,blockInFront,snakelength\nsnakex = []\nsnakey = []\nsheadp = [round(sz[0]/2),round(sz[1]/2)]\nblockInFront = 0\nsnakelength = 0\ndisplaylen = 0\ndef createBlankGraph(size,filler):\n width = size[0]\n height = size[1]\n blank = []\n for i in range(width*height):\n blank.append(filler)\n return blank\nMap = createBlankGraph(sz,0)\ndef placeFood(filler):\n for i in range(len(Map)):\n if Map[i]==filler:\n Map[i]=0\n break\n rndm = round(random.random()*len(Map))\n while True:\n rndm = round(random.random()*len(Map))\n try:\n if Map[rndm]!=2:\n Map[rndm]=filler\n break\n except:\n continue\n return rndm\ndef setXY(x,y,setTo):\n Map[y*sz[0]+x] = setTo\n return y*sz[0]+x\ndef retXY(x,y):\n return Map[y*sz[0]+x]\ndef retID(x,y):\n return y*sz[0]+x\ndef checkSnakeCollide(headp):\n headx = headp[0]\n heady = headp[1]\n Colliding = False\n for i in range(len(snakex)):\n if snakex[i] == headx and snakey[i] == heady and i!=snakelength-1:\n Colliding = True\n return Colliding\nz = 0\nMoveNotUsed = True\nwhile True:\n try:\n if PiControl.buttonChecked('w') and yp!=1 and MoveNotUsed:\n yp=-1\n xp=0\n MoveNotUsed = False\n elif PiControl.buttonChecked('a') and xp!=1 and MoveNotUsed:\n yp=0\n xp=-1\n MoveNotUsed = False\n elif PiControl.buttonChecked('s') and yp!=-1 and MoveNotUsed:\n yp=1\n xp=0\n MoveNotUsed = False\n elif PiControl.buttonChecked('d') and xp!=-1 and MoveNotUsed:\n yp=0\n xp=1\n MoveNotUsed = False\n if z == 30:\n os.system('cls' if os.name == 'nt' else 'clear')\n Map = createBlankGraph(sz,0)\n Draw.setMap(Map,sz)\n if foodBeingEaten:\n food = placeFood(1)\n foodBeingEaten = False\n try:\n setXY(sheadp[0],sheadp[1],2)\n except:\n break\n Map[food] = 1\n if displaylen > snakelength:\n snakelength+=1\n sheadp[0]=sheadp[0]+xp\n sheadp[1]=sheadp[1]+yp\n snakex.append(sheadp[0])\n snakey.append(sheadp[1])\n if len(snakex)>snakelength and len(snakey)>snakelength:\n del snakex[0]\n del snakey[0]\n for i in range(len(snakex)):\n try:\n setXY(snakex[i],snakey[i],2)\n except:\n break\n if sheadp[0]<=-1 or sheadp[0]>sz[0] or sheadp[1]<=-1 or sheadp[1]>sz[1] or checkSnakeCollide(sheadp):\n break\n if retID(sheadp[0],sheadp[1]) == food:\n foodBeingEaten = True\n displaylen+=4\n Draw.drawMap()\n z=0\n MoveNotUsed = True\n time.sleep(1/300)\n z+=1\n except:\n continue\nfrom scripts import lose\nlose.Lose(snakelength)" }, { "alpha_fraction": 0.7763158082962036, "alphanum_fraction": 0.7763158082962036, "avg_line_length": 18.25, "blob_id": "8d2d4637bb5e02de2a683bb23ee6bc53a8d6d823", "content_id": "a532cb5fcb9256c673bb9116f4bcb4f3e59dac4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "no_license", "max_line_length": 35, "num_lines": 4, "path": "/Snake/scripts/PiControl.py", "repo_name": "borkode/python-games", "src_encoding": "UTF-8", "text": "import keyboard\n\ndef buttonChecked(key):\n return keyboard.is_pressed(key)" } ]
6
bxs-machine-learning-club/November-2019-Boston-Housing
https://github.com/bxs-machine-learning-club/November-2019-Boston-Housing
fa28e2dbb953479894b0ae0897ce6026a7a2b049
6f1809a2cf472b3fc64f805b43072ebb1ff0d948
9472b6f3e705a4d07bb9a25c88bd3859ec765168
refs/heads/master
2022-04-14T05:18:35.425156
2020-02-25T21:59:31
2020-02-25T21:59:31
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7572446465492249, "alphanum_fraction": 0.7686460614204407, "avg_line_length": 36.58928680419922, "blob_id": "65bcb0a520c77becf34878a528896c52994f20bc", "content_id": "d03d03ae0e0c93ff61dfde4f1d55d7c99cde85d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2105, "license_type": "no_license", "max_line_length": 192, "num_lines": 56, "path": "/ANN-Solution.py", "repo_name": "bxs-machine-learning-club/November-2019-Boston-Housing", "src_encoding": "UTF-8", "text": "# The average root mean square error (RMSE) of the model is 0.39\n#Importing the important modules\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_boston\nfrom sklearn.preprocessing import scale\nimport tensorflow.keras as keras\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\n#Loading the data\ndataset = load_boston()\n\n#Scaling all the features so that they are inbetween 0 and 1\nx_scaled = scale(dataset.data)\ny_scaled = scale(dataset.target)\n\n#Dividing the data between training and testing\nx_train, x_test, y_train, y_test = train_test_split(x_scaled, y_scaled)\n\n#Neural Network:\n#Sequential: A linearly connected layers\n\n#Activation: Defines the output of that node given an input or set of inputs. ReLU is a popular one\n\n#Input layer: Takes in the 13 features of each point in the datset and passes it though an activation method\n\n#Hidden Layer: Outputs 13 points, after passing through an activation method\n\n#Output Layer: Outputs one point (the guess), after using the previous layer's points and passing it through an activation method. This time, it's linear because we are using linear regression\nmodel = Sequential([\n Dense(20, activation=\"relu\", input_dim=13),\n Dense(13, activation=\"relu\"),\n Dense(1, activation=\"linear\")\n])\n\n#Compiler:\n#Loss: sets our metric of error to MSE\n\n#Optimizer: Tries to decrease the metric of error over time. ADAM is a popular open\n\n#Metrics: The \"scores\" that will be given after the learning is finished\nmodel.compile(loss=\"mean_squared_error\", optimizer=\"adam\", metrics=[\"mean_squared_error\", \"mean_absolute_error\"])\n\n#Fitting:\n#Epochs: The amount of times the neural network trains.\n\n#Validation Split: An extra testing set based off the training set for the neural network to compare results with.\nmodel.fit(x_train, y_train, epochs=1000, validation_split=0.2)\n\n\n#Evaluating how good our model was\nloss, mse, mae = model.evaluate(x_test, y_test)\nprint('The average root mean square error (RMSE) of the model is {:5.2f}'.format(np.sqrt(mse)))\n" }, { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 33.5, "blob_id": "6251292201b4fdd926976621a57b682376213c70", "content_id": "d4e0ae9a0a1dfc1c6b046cc8a942a9baf9f19a50", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 69, "license_type": "no_license", "max_line_length": 51, "num_lines": 2, "path": "/README.md", "repo_name": "bxs-machine-learning-club/November-2019-Boston-Housing", "src_encoding": "UTF-8", "text": "# Boston-Housing\nTask: Develop an ANN on the Boston Housing dataset.\n" } ]
2
sayan1995/Competitive-Coding-6
https://github.com/sayan1995/Competitive-Coding-6
5d5b21e564d71c36ebc91e8bc884c986991195b3
597b0a49ce4e9aa048a94446e2ef5440a0cbf460
5909212fe3d011b5c3e1edf401c0586163d21a30
refs/heads/master
2021-05-17T11:18:42.186897
2020-03-30T05:45:05
2020-03-30T05:45:05
250,752,867
0
0
null
2020-03-28T09:05:54
2019-10-11T20:23:27
2020-03-21T01:15:07
null
[ { "alpha_fraction": 0.588388204574585, "alphanum_fraction": 0.597920298576355, "avg_line_length": 35.09375, "blob_id": "660352ac8a31b7b1fba6628adaf735c8891b3cf7", "content_id": "b1556b28ac36b07f1c3cb744cc72a33a0f84fcb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1154, "license_type": "no_license", "max_line_length": 117, "num_lines": 32, "path": "/beautifulArrangements.py", "repo_name": "sayan1995/Competitive-Coding-6", "src_encoding": "UTF-8", "text": "'''\nTime Complexity: O(n!)\nSpace Complexity: O(n)\nDid this code successfully run on Leetcode : Yes\nExplanation: Create a state array to keep track of state changes and a set to make sure we add the element only once,\nuse the index in the recursive call to keep track of the current index in the state array and check if\n(array[i] % (index) == 0 or index % array[i] == 0) if this is true only then we backtrack and the index is an\naccepted beautiful solution.\n'''\nclass Solution:\n def __init__(self):\n self.count = 0\n\n def backtrack(self, state: list, index, array, check):\n\n if len(state) == len(array):\n self.count += 1\n\n for i in range(0, len(array)):\n if array[i] not in check and (array[i] % (index) == 0 or index % array[i] == 0):\n state.append(array[i])\n check.add(array[i])\n self.backtrack(state, index + 1, array, check)\n check.remove(array[i])\n state.pop()\n\n def countArrangement(self, N: int) -> int:\n array = [x for x in range(1, N + 1)]\n\n self.backtrack([], 1, array, set())\n\n return self.count" }, { "alpha_fraction": 0.38072288036346436, "alphanum_fraction": 0.4030120372772217, "avg_line_length": 25.206348419189453, "blob_id": "e0ee2aab425377d5dd329e68af62a06501b53f1d", "content_id": "532d7498060e1dafbd6944774334467f60b48794", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1660, "license_type": "no_license", "max_line_length": 122, "num_lines": 63, "path": "/logger.py", "repo_name": "sayan1995/Competitive-Coding-6", "src_encoding": "UTF-8", "text": "class Solution:\n\n def minWindow(self, s: str, t: str) -> str:\n if s == None or t == None:\n return \"\"\n\n frequency = [0] * 26\n mark = [0] * 26\n\n if s[0].isupper():\n case = case\n else:\n case = case\n case = case\n for i in range(0, len(t)):\n ch = (t[i]).lower()\n frequency[ord(ch) - ord(case)] += 1\n mark[ord(ch) - ord(case)] = 1\n\n unique = 0\n for i in range(0, len(mark)):\n if mark[i] == 1:\n unique += 1\n\n start = 0\n end = 0\n temp = ''\n res = math.inf\n result = ''\n currentFrequency = [0] * 26\n\n current = 0\n\n while end < len(s):\n\n # print(end)\n ch = s[end]\n if mark[ord(ch) - ord(case)] == 1:\n currentFrequency[ord(ch) - ord(case)] += 1\n\n if mark[ord(ch) - ord(case)] == 1 and frequency[ord(ch) - ord(case)] == currentFrequency[ord(ch) - ord(case)]:\n current += 1\n\n while start <= end and current == unique:\n\n if res > end - start + 1:\n res = end - start + 1\n result = s[start:end + 1]\n\n ch1 = s[start]\n currentFrequency[ord(ch1) - ord(case)] -= 1\n\n if mark[ord(ch1) - ord(case)] == 1 and frequency[ord(ch1) - ord(case)] > currentFrequency[\n ord(ch1) - ord(case)]:\n current -= 1\n\n start += 1\n end += 1\n\n if res == math.inf:\n return ''\n else:\n return result\n\n\n\n\n\n\n\n\n\n" } ]
2
musicicon/MIT-6.0001-solutions-to-projects
https://github.com/musicicon/MIT-6.0001-solutions-to-projects
e13063e225f3b6119966eee420824e5601c3251f
f66cb45b4f9f982c3c14bb324c19e2e680c3c7fe
c81528c143eb9cb763a0bd6024b74fdf448c6e7a
refs/heads/master
2022-12-05T14:38:41.581859
2020-08-18T05:44:32
2020-08-18T05:44:32
288,364,806
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5863171815872192, "alphanum_fraction": 0.590295672416687, "avg_line_length": 31.469974517822266, "blob_id": "5a9dde2044547c63061f203526db846738fef62e", "content_id": "a53a447f11f966e5c137515dc9e84bfab2db585e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12819, "license_type": "no_license", "max_line_length": 113, "num_lines": 383, "path": "/Problem Set 2_Hangman Game.py", "repo_name": "musicicon/MIT-6.0001-solutions-to-projects", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 16 12:11:21 2020\r\n\r\n@author: Varun\r\n\"\"\"\r\n\r\n# Problem Set 2, hangman.py\r\n# Name: \r\n# Collaborators:\r\n# Time spent:\r\n\r\n# Hangman Game\r\n# -----------------------------------\r\n# Helper code\r\n# You don't need to understand this helper code,\r\n# but you will have to know how to use the functions\r\n# (so be sure to read the docstrings!)\r\nimport random\r\nimport string\r\n\r\nWORDLIST_FILENAME = \"words.txt\"\r\n\r\n\r\ndef load_words():\r\n \"\"\"\r\n Returns a list of valid words. Words are strings of lowercase letters.\r\n \r\n Depending on the size of the word list, this function may\r\n take a while to finish.\r\n \"\"\"\r\n print(\"Loading word list from file...\")\r\n # inFile: file\r\n inFile = open(WORDLIST_FILENAME, 'r')\r\n # line: string\r\n line = inFile.readline()\r\n # wordlist: list of strings\r\n wordlist = line.split()\r\n print(\" \", len(wordlist), \"words loaded.\")\r\n return wordlist\r\n\r\n\r\n\r\ndef choose_word(wordlist):\r\n \"\"\"\r\n wordlist (list): list of words (strings)\r\n \r\n Returns a word from wordlist at random\r\n \"\"\"\r\n return random.choice(wordlist)\r\n\r\n# end of helper code\r\n\r\n# -----------------------------------\r\n\r\n# Load the list of words into the variable wordlist\r\n# so that it can be accessed from anywhere in the program\r\nwordlist = load_words()\r\n\r\n\r\ndef is_word_guessed(secret_word, letters_guessed):\r\n '''\r\n secret_word: string, the word the user is guessing; assumes all letters are\r\n lowercase\r\n letters_guessed: list (of letters), which letters have been guessed so far;\r\n assumes that all letters are lowercase\r\n returns: boolean, True if all the letters of secret_word are in letters_guessed;\r\n False otherwise\r\n '''\r\n iswordguessed = True\r\n for i in secret_word:\r\n if i not in letters_guessed:\r\n iswordguessed = False\r\n break\r\n return iswordguessed\r\n\r\n\r\ndef get_guessed_word(secret_word, letters_guessed):\r\n '''\r\n secret_word: string, the word the user is guessing\r\n letters_guessed: list (of letters), which letters have been guessed so far\r\n returns: string, comprised of letters, underscores (_), and spaces that represents\r\n which letters in secret_word have been guessed so far.\r\n '''\r\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\r\n blanks = []\r\n for i in range(len(secret_word)):\r\n blanks.append('_ ')\r\n j = 0\r\n while j < len(secret_word):\r\n if secret_word[j] in letters_guessed:\r\n blanks[j] = secret_word[j] \r\n j += 1\r\n \r\n return ''.join(blanks)\r\n\r\n\r\ndef get_available_letters(letters_guessed):\r\n '''\r\n letters_guessed: list (of letters), which letters have been guessed so far\r\n returns: string (of letters), comprised of letters that represents which letters have not\r\n yet been guessed.\r\n '''\r\n \r\n allletters = list(string.ascii_lowercase)\r\n for i in letters_guessed:\r\n if i in allletters:\r\n del(allletters[allletters.index(i)]) \r\n #we used this long format because python gets confused whether i belongs to \r\n #letters_guessed or i belongs to allletters\r\n \r\n return ''.join(allletters)\r\n\r\ndef islettercorrect(letter, numguess, strike, letters_guessed):\r\n \r\n while True:\r\n if letter.isalpha() and letter.lower() in get_available_letters(letters_guessed):\r\n break\r\n # elif letter == \"*\" :\r\n # break\r\n else:\r\n if strike != 0:\r\n print(\"Three Strikes and you loose a guess\")\r\n print(\"Strike \", strike)\r\n if strike == 3:\r\n numguess -= 1\r\n strike = 0\r\n print(\"Guesses Remaining: \", numguess)\r\n if numguess == 0:\r\n break\r\n if not letter.isalpha():\r\n \r\n letter = input(\"Please enter an aplhabet:\")\r\n if letter == \"*\":\r\n strike -= 1\r\n print(show_possible_matches(get_guessed_word(secret_word, letters_guessed), letters_guessed))\r\n print(\"letters guessed\", letters_guessed)\r\n strike +=1\r\n \r\n \r\n \r\n print(\"\\n--------------------------\")\r\n \r\n return (letter, numguess, strike)\r\n\r\n\r\ndef getuniquewords(secret_word):\r\n count = 0\r\n listofletters = string.ascii_lowercase\r\n for i in listofletters:\r\n if i in secret_word:\r\n count += 1\r\n \r\n return count\r\n \r\n \r\n\r\ndef hangman(secret_word):\r\n '''\r\n secret_word: string, the secret word to guess.\r\n \r\n Starts up an interactive game of Hangman.\r\n \r\n * At the start of the game, let the user know how many \r\n letters the secret_word contains and how many guesses s/he starts with.\r\n \r\n * The user should start with 6 guesses\r\n\r\n * Before each round, you should display to the user how many guesses\r\n s/he has left and the letters that the user has not yet guessed.\r\n \r\n * Ask the user to supply one guess per round. Remember to make\r\n sure that the user puts in a letter!\r\n \r\n * The user should receive feedback immediately after each guess \r\n about whether their guess appears in the computer's word.\r\n\r\n * After each guess, you should display to the user the \r\n partially guessed word so far.\r\n \r\n Follows the other limitations detailed in the problem write-up.\r\n '''\r\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\r\n uniquewords = getuniquewords(secret_word)\r\n numguess = 6\r\n letters_guessed = []\r\n strike = 1\r\n print(\"Welcome to the game Hangman\")\r\n print(\"I am thinking of a word that is\", len(secret_word), \"letters long\")\r\n print(\"----------------------------------\")\r\n \r\n print(\"Available guesses: \", numguess)\r\n print(\"Available letters: \", get_available_letters(letters_guessed))\r\n print(\"If you wrongly guess a consonant you get penalty of 1 guess\")\r\n print(\"If you wrongly guess a vowel you get a penalty of 2 guess\")\r\n \r\n \r\n while not(is_word_guessed(secret_word, letters_guessed)) and numguess > 0: \r\n \r\n letter = input(\"Please input your guessed letter: \")\r\n \r\n (letter, numguess, strike) = islettercorrect(letter, numguess, strike, letters_guessed)\r\n if letter.lower() in secret_word:\r\n numguess +=1\r\n elif letter.lower() in 'aeiou' :\r\n numguess -=1\r\n \r\n letters_guessed.append(letter.lower())\r\n print(get_guessed_word(secret_word, letters_guessed))\r\n \r\n \r\n numguess -=1\r\n if numguess == -1:\r\n numguess = 0\r\n print(\"\\nGuesses Remaining: \", numguess)\r\n print(\"Available Letters: \", get_available_letters(letters_guessed))\r\n print(\"----------------------------------\")\r\n if is_word_guessed(secret_word, letters_guessed):\r\n print(\"You won with a score of \",numguess*uniquewords)\r\n else:\r\n print(\"Game Over! you loose\")\r\n \r\n# When you've completed your hangman function, scroll down to the bottom\r\n# of the file and uncomment the first two lines to test\r\n#(hint: you might want to pick your own\r\n# secret_word while you're doing your own testing)\r\n\r\n\r\n# -----------------------------------\r\n\r\n\r\n\r\ndef match_with_gaps(my_word, other_word, letters_guessed):\r\n '''\r\n my_word: string with _ characters, current guess of secret word\r\n other_word: string, regular English word\r\n returns: boolean, True if all the actual letters of my_word match the \r\n corresponding letters of other_word, or the letter is the special symbol\r\n _ , and my_word and other_word are of the same length;\r\n False otherwise: \r\n '''\r\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\r\n \r\n iword = my_word.replace(\" \", \"\")\r\n what_to_return = True\r\n for n in range(len(other_word)):\r\n if iword[n] != \"_\" and iword[n] != other_word[n]:\r\n what_to_return = False\r\n break\r\n for a in range(len(other_word)):\r\n if other_word[a] in letters_guessed and other_word[a] != iword[a]:\r\n what_to_return = False\r\n \r\n \r\n return what_to_return\r\n\r\n\r\n\r\ndef show_possible_matches(my_word, letters_guessed):\r\n '''\r\n my_word: string with _ characters, current guess of secret word\r\n returns: nothing, but should print out every word in wordlist that matches my_word\r\n Keep in mind that in hangman when a letter is guessed, all the positions\r\n at which that letter occurs in the secret word are revealed.\r\n Therefore, the hidden letter(_ ) cannot be one of the letters in the word\r\n that has already been revealed.\r\n\r\n '''\r\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\r\n \r\n possible = []\r\n new_word = my_word.replace(\" \",\"\")\r\n \r\n newlist = []\r\n for i in wordlist:\r\n if len(i) == len(new_word):\r\n newlist.append(i)\r\n \r\n \r\n for o in newlist: \r\n if match_with_gaps(new_word, o, letters_guessed):\r\n possible.append(o)\r\n o = 0\r\n return possible\r\n \r\n\r\n\r\ndef hangman_with_hints(secret_word):\r\n '''\r\n secret_word: string, the secret word to guess.\r\n \r\n Starts up an interactive game of Hangman.\r\n \r\n * At the start of the game, let the user know how many \r\n letters the secret_word contains and how many guesses s/he starts with.\r\n \r\n * The user should start with 6 guesses\r\n \r\n * Before each round, you should display to the user how many guesses\r\n s/he has left and the letters that the user has not yet guessed.\r\n \r\n * Ask the user to supply one guess per round. Make sure to check that the user guesses a letter\r\n \r\n * The user should receive feedback immediately after each guess \r\n about whether their guess appears in the computer's word.\r\n\r\n * After each guess, you should display to the user the \r\n partially guessed word so far.\r\n \r\n * If the guess is the symbol *, print out all words in wordlist that\r\n matches the current guessed word. \r\n \r\n Follows the other limitations detailed in the problem write-up.\r\n '''\r\n # FILL IN YOUR CODE HERE AND DELETE \"pass\"\r\n \r\n uniquewords = getuniquewords(secret_word)\r\n numguess = 6\r\n letters_guessed = []\r\n strike = 1\r\n print(\"Welcome to the game Hangman\")\r\n print(\"I am thinking of a word that is\", len(secret_word), \"letters long\")\r\n print(\"----------------------------------\")\r\n \r\n print(\"Available guesses: \", numguess)\r\n print(\"Available letters: \", get_available_letters(letters_guessed))\r\n print(\"If you wrongly guess a consonant you get penalty of 1 guess\")\r\n print(\"If you wrongly guess a vowel you get a penalty of 2 guess\")\r\n print(\"To get a hint at anytime in game enter an asterik * \")\r\n \r\n \r\n while not(is_word_guessed(secret_word, letters_guessed)) and numguess > 0: \r\n \r\n letter = input(\"Please input your guessed letter: \")\r\n if letter == \"*\":\r\n numguess +=1\r\n print(show_possible_matches(get_guessed_word(secret_word, letters_guessed), letters_guessed))\r\n \r\n (letter, numguess, strike) = islettercorrect(letter, numguess, strike, letters_guessed)\r\n if letter.lower() in secret_word:\r\n numguess +=1\r\n elif letter.lower() in 'aeiou' :\r\n numguess -=1\r\n \r\n letters_guessed.append(letter.lower())\r\n print(get_guessed_word(secret_word, letters_guessed))\r\n \r\n \r\n numguess -=1\r\n if numguess == -1:\r\n numguess = 0\r\n print(\"\\nGuesses Remaining: \", numguess)\r\n print(\"Available Letters: \", get_available_letters(letters_guessed))\r\n print(\"----------------------------------\")\r\n if is_word_guessed(secret_word, letters_guessed):\r\n print(\"You won with a score of \",numguess*uniquewords)\r\n else:\r\n print(\"Game Over! you loose\")\r\n print(secret_word)\r\n\r\n\r\n# When you've completed your hangman_with_hint function, comment the two similar\r\n# lines above that were used to run the hangman function, and then uncomment\r\n# these two lines and run this file to test!\r\n# Hint: You might want to pick your own secret_word while you're testing.\r\n\r\n\r\nif __name__ == \"__main__\":\r\n# # pass\r\n\r\n# # To test part 2, comment out the pass line above and\r\n# # uncomment the following two lines.\r\n \r\n# secret_word = choose_word(wordlist)\r\n# hangman(secret_word)\r\n\r\n###############\r\n \r\n # To test part 3 re-comment out the above lines and \r\n # uncomment the following two lines. \r\n \r\n secret_word = choose_word(wordlist)\r\n hangman_with_hints(secret_word)\r\n" } ]
1
nishanth-vimalesh/3D-human-pose-estimation
https://github.com/nishanth-vimalesh/3D-human-pose-estimation
d60e30f6faf9a28c567843a55dca5b8815aa9026
e251dc086254e66ff7f4b9a2e6db6fa850d6f7f8
d22708464f55fbea3fb2f6d37836fe5ae334706d
refs/heads/master
2022-04-28T03:50:51.121253
2019-08-30T15:39:08
2019-08-30T15:39:08
205,407,879
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.6436046361923218, "alphanum_fraction": 0.6459302306175232, "avg_line_length": 32.096153259277344, "blob_id": "00ce4eec6e7ae639fab7e0b8e02e45ab7ce9dc32", "content_id": "8cb0b43660e779f9b0acdb93f5087be99c63f1fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1720, "license_type": "no_license", "max_line_length": 76, "num_lines": 52, "path": "/src/lib/model.py", "repo_name": "nishanth-vimalesh/3D-human-pose-estimation", "src_encoding": "UTF-8", "text": "import torchvision.models as models\nimport torch\nimport torch.nn as nn\nimport os\n\n# from models.msra_resnet import get_pose_net\nfrom models.hrnet import get_pose_net\n\ndef create_model(cfg, opt): \n if 'msra' in opt.arch:\n print(\"=> using msra resnet '{}'\".format(opt.arch))\n num_layers = int(opt.arch[opt.arch.find('_') + 1:])\n # model = get_pose_net(num_layers, opt.heads)\n model = get_pose_net(cfg, opt.heads, is_train = True)\n optimizer = torch.optim.Adam(model.parameters(), opt.lr)\n else:\n assert 0, \"Model not supported!\"\n \n start_epoch = 1\n if opt.load_model != '':\n checkpoint = torch.load(\n opt.load_model, map_location=lambda storage, loc: storage)\n print('loaded {}, epoch {}'.format(opt.load_model, checkpoint['epoch']))\n if type(checkpoint) == type({}):\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint.state_dict()\n model.load_state_dict(state_dict, strict=False)\n if opt.resume:\n print('resuming optimizer')\n optimizer.load_state_dict(checkpoint['optimizer'])\n start_epoch = checkpoint['epoch'] + 1\n for state in optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda(opt.device, non_blocking=True)\n\n return model, optimizer, start_epoch\n \ndef save_model(path, epoch, model, optimizer=None):\n if isinstance(model, torch.nn.DataParallel):\n state_dict = model.module.state_dict()\n else:\n state_dict = model.state_dict()\n data = {'epoch': epoch,\n 'state_dict': state_dict}\n if not (optimizer is None):\n data['optimizer'] = optimizer.state_dict()\n torch.save(data, path)\n\nif __name__ == \"__main__\":\n pass" }, { "alpha_fraction": 0.5246516466140747, "alphanum_fraction": 0.5310825109481812, "avg_line_length": 27.287878036499023, "blob_id": "29925c99d31f61fca31e2957da7adb0735b12cf5", "content_id": "0298a243c42e2b55403bbe0f39f50b4ae843f1cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1866, "license_type": "no_license", "max_line_length": 120, "num_lines": 66, "path": "/src/exp.py", "repo_name": "nishanth-vimalesh/3D-human-pose-estimation", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# import os\n# import logging\n\n# from config import cfg\n# from config import update_config\nfrom config.default import _C as cfg\nfrom config.default import update_config\nimport argparse\n\n# import torch\n# import torch.nn as nn\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train keypoints network')\n # general\n parser.add_argument('--cfg',\n help='experiment configure file name',\n required=False,\n default= 'D:\\CV-Project\\pytorch-pose-hg-3d\\experiments\\mpii\\hrnet\\w32_256x256_adam_lr1e-3.yaml',\n type=str)\n\n parser.add_argument('opts',\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER)\n\n # philly\n parser.add_argument('--modelDir',\n help='model directory',\n type=str,\n default='')\n parser.add_argument('--logDir',\n help='log directory',\n type=str,\n default='')\n parser.add_argument('--dataDir',\n help='data directory',\n type=str,\n default='')\n parser.add_argument('--prevModelDir',\n help='prev Model directory',\n type=str,\n default='')\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n args = parse_args()\n update_config(cfg, args)\n extra = cfg.MODEL.EXTRA\n print(cfg.MODEL.NAME)\n print(extra.FINAL_CONV_KERNEL)\n print(cfg.MODEL.EXTRA.STAGE2.NUM_CHANNELS)\n\n\n\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.4693877696990967, "alphanum_fraction": 0.509671688079834, "avg_line_length": 35.128204345703125, "blob_id": "98911e1641ae779fc5efa48e8e5af0be457d1dfd", "content_id": "bc2788dff6ab080f74377eb891817b217237e1cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5635, "license_type": "no_license", "max_line_length": 91, "num_lines": 156, "path": "/src/lib/models/losses.py", "repo_name": "nishanth-vimalesh/3D-human-pose-estimation", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nfrom torch.autograd import Function\nimport numpy as np\n\ndef _gather_feat(feat, ind, mask=None):\n dim = feat.size(2)\n ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)\n feat = feat.gather(1, ind)\n if mask is not None:\n mask = mask.unsqueeze(2).expand_as(feat)\n feat = feat[mask]\n feat = feat.view(-1, dim)\n return feat\n\n'''\ndef _tranpose_and_gather_feat(feat, ind):\n feat = feat.permute(0, 2, 3, 1).contiguous()\n feat = feat.view(feat.size(0), -1, feat.size(3))\n feat = _gather_feat(feat, ind)\n return feat\n'''\n\ndef _tranpose_and_gather_scalar(feat, ind):\n feat = feat.permute(0, 2, 3, 1).contiguous()\n # feat = feat.view(feat.size(0), -1, feat.size(3))\n feat = feat.view(feat.size(0), -1, 1)\n feat = _gather_feat(feat, ind)\n return feat\n\ndef reg_loss(regr, gt_regr, mask):\n num = mask.float().sum()\n mask = mask.unsqueeze(2).expand_as(gt_regr)\n\n regr = regr * mask.float()\n gt_regr = gt_regr * mask.float()\n \n regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, size_average=False)\n # regr_loss = nn.functional.mse_loss(regr, gt_regr, size_average=False)\n regr_loss = regr_loss / (num + 1e-4)\n return regr_loss\n\n\nclass RegLoss(nn.Module):\n def __init__(self):\n super(RegLoss, self).__init__()\n \n def forward(self, output, mask, ind, target):\n pred = _tranpose_and_gather_scalar(output, ind)\n loss = reg_loss(pred, target, mask)\n return loss\n\nclass FusionLoss(nn.Module):\n def __init__(self, device, reg_weight, var_weight):\n super(FusionLoss, self).__init__()\n self.reg_weight = reg_weight\n self.var_weight = var_weight\n self.device = device\n \n def forward(self, output, mask, ind, target, gt_2d):\n pred = _tranpose_and_gather_scalar(output, ind)\n loss = torch.cuda.FloatTensor(1)[0] * 0\n if self.reg_weight > 0:\n loss += self.reg_weight * reg_loss(pred, target, mask)\n if self.var_weight > 0:\n loss += VarLoss(\n self.device, self.var_weight)(pred, target, mask, gt_2d)[0] # target for visibility\n return loss.to(self.device, non_blocking=True)\n\nclass VarLoss(Function):\n def __init__(self, device, var_weight):\n super(VarLoss, self).__init__()\n self.device = device\n self.var_weight = var_weight\n self.skeleton_idx = [[[0,1], [1,2],\n [3,4], [4,5]],\n [[10,11], [11,12],\n [13,14], [14,15]], \n [[2, 6], [3, 6]], \n [[12,8], [13,8]]]\n self.skeleton_weight = [[1.0085885098415446, 1, \n 1, 1.0085885098415446], \n [1.1375361376887123, 1, \n 1, 1.1375361376887123], \n [1, 1], \n [1, 1]]\n\n \n def forward(self, input, visible, mask, gt_2d):\n xy = gt_2d.view(gt_2d.size(0), -1, 2)\n batch_size = input.size(0)\n output = torch.FloatTensor(1) * 0\n for t in range(batch_size):\n if mask[t].sum() == 0: # mask is the mask for supervised depth\n # xy[t] = 2.0 * xy[t] / ref.outputRes - 1\n for g in range(len(self.skeleton_idx)):\n E, num = 0, 0\n N = len(self.skeleton_idx[g])\n l = np.zeros(N)\n for j in range(N):\n id1, id2 = self.skeleton_idx[g][j]\n if visible[t, id1] > 0.5 and visible[t, id2] > 0.5:\n l[j] = (((xy[t, id1] - xy[t, id2]) ** 2).sum() + \\\n (input[t, id1] - input[t, id2]) ** 2) ** 0.5\n l[j] = l[j] * self.skeleton_weight[g][j]\n num += 1\n E += l[j]\n if num < 0.5:\n E = 0\n else:\n E = E / num\n loss = 0\n for j in range(N):\n if l[j] > 0:\n loss += (l[j] - E) ** 2 / 2. / num\n output += loss \n output = self.var_weight * output / batch_size\n self.save_for_backward(input, visible, mask, gt_2d)\n output = output.cuda(self.device, non_blocking=True)\n return output\n \n def backward(self, grad_output):\n input, visible, mask, gt_2d = self.saved_tensors\n input = input.cpu()\n xy = gt_2d.view(gt_2d.size(0), -1, 2)\n grad_input = torch.zeros(input.size())\n batch_size = input.size(0)\n for t in range(batch_size):\n if mask[t].sum() == 0: # mask is the mask for supervised depth\n for g in range(len(self.skeleton_idx)):\n E, num = 0, 0\n N = len(self.skeleton_idx[g])\n l = np.zeros(N)\n for j in range(N):\n id1, id2 = self.skeleton_idx[g][j]\n if visible[t, id1] > 0.5 and visible[t, id2] > 0.5:\n l[j] = (((xy[t, id1] - xy[t, id2]) ** 2).sum() + \\\n (input[t, id1] - input[t, id2]) ** 2) ** 0.5\n l[j] = l[j] * self.skeleton_weight[g][j]\n num += 1\n E += l[j]\n if num < 0.5:\n E = 0\n else:\n E = E / num\n for j in range(N):\n if l[j] > 0:\n id1, id2 = self.skeleton_idx[g][j]\n grad_input[t][id1] += self.var_weight * \\\n self.skeleton_weight[g][j] ** 2 / num * (l[j] - E) \\\n / l[j] * (input[t, id1] - input[t, id2]) / batch_size\n grad_input[t][id2] += self.var_weight * \\\n self.skeleton_weight[g][j] ** 2 / num * (l[j] - E) \\\n / l[j] * (input[t, id2] - input[t, id1]) / batch_size\n grad_input = grad_input.cuda(self.device, non_blocking=True)\n return grad_input, None, None, None" }, { "alpha_fraction": 0.5105733871459961, "alphanum_fraction": 0.5552648901939392, "avg_line_length": 36.867767333984375, "blob_id": "3be51c4e4dceeb9880a27e92660ac3d222a6836f", "content_id": "5cafe410c207d385f73ede51cb10c62c06e4363d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4587, "license_type": "no_license", "max_line_length": 98, "num_lines": 121, "path": "/src/lib/utils/debugger.py", "repo_name": "nishanth-vimalesh/3D-human-pose-estimation", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d\nfrom mpl_toolkits.mplot3d import Axes3D\n \ndef show_2d(img, points, c, edges):\n num_joints = points.shape[0]\n points = ((points.reshape(num_joints, -1))).astype(np.int32)\n for j in range(num_joints):\n cv2.circle(img, (points[j, 0], points[j, 1]), 3, c, -1)\n for e in edges:\n if points[e].min() > 0:\n cv2.line(img, (points[e[0], 0], points[e[0], 1]),\n (points[e[1], 0], points[e[1], 1]), c, 2)\n return img\n\nmpii_edges = [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5], \n [10, 11], [11, 12], [12, 8], [8, 13], [13, 14], [14, 15], \n [6, 8], [8, 9]]\n\nclass Debugger(object):\n def __init__(self, ipynb=False, edges=mpii_edges):\n self.ipynb = ipynb\n if not self.ipynb:\n self.plt = plt\n self.fig = self.plt.figure()\n self.ax = self.fig.add_subplot((111),projection='3d')\n self.ax.grid(False)\n oo = 1e10\n self.xmax, self.ymax, self.zmax = -oo, -oo, -oo\n self.xmin, self.ymin, self.zmin = oo, oo, oo\n self.imgs = {}\n self.edges=edges\n \n\n \n def add_point_3d(self, points, c='b', marker='o', edges=None):\n if edges == None:\n edges = self.edges\n #show3D(self.ax, point, c, marker = marker, edges)\n points = points.reshape(-1, 3)\n x, y, z = np.zeros((3, points.shape[0]))\n for j in range(points.shape[0]):\n x[j] = points[j, 0].copy()\n y[j] = points[j, 2].copy()\n z[j] = - points[j, 1].copy()\n self.xmax = max(x[j], self.xmax)\n self.ymax = max(y[j], self.ymax)\n self.zmax = max(z[j], self.zmax)\n self.xmin = min(x[j], self.xmin)\n self.ymin = min(y[j], self.ymin)\n self.zmin = min(z[j], self.zmin)\n if c == 'auto':\n c = [(z[j] + 0.5, y[j] + 0.5, x[j] + 0.5) for j in range(points.shape[0])]\n self.ax.scatter(x, y, z, s = 200, c = c, marker = marker)\n for e in edges:\n self.ax.plot(x[e], y[e], z[e], c = c)\n \n def show_3d(self, imgId, path):\n max_range = np.array([self.xmax-self.xmin, self.ymax-self.ymin, self.zmax-self.zmin]).max()\n Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(self.xmax+self.xmin)\n Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(self.ymax+self.ymin)\n Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(self.zmax+self.zmin)\n for xb, yb, zb in zip(Xb, Yb, Zb):\n self.ax.plot([xb], [yb], [zb], 'w')\n self.plt.savefig(path + '{}_hr_last1.png'.format(imgId), bbox_inches='tight', frameon = False)\n # self.plt.show()\n\n \n def add_img(self, img, imgId = 'default'):\n self.imgs[imgId] = img.copy()\n \n def add_mask(self, mask, bg, imgId = 'default', trans = 0.8):\n self.imgs[imgId] = (mask.reshape(mask.shape[0], mask.shape[1], 1) * 255 * trans + \\\n bg * (1 - trans)).astype(np.uint8)\n\n def add_point_2d(self, point, c, imgId='default'):\n self.imgs[imgId] = show_2d(self.imgs[imgId], point, c, self.edges)\n \n def show_img(self, pause = False, imgId = 'default'):\n cv2.imshow('{}'.format(imgId), self.imgs[imgId])\n if pause:\n cv2.waitKey()\n \n def show_all_imgs(self, pause = False):\n if not self.ipynb:\n for i, v in self.imgs.items():\n pass\n # cv2.imshow('{}'.format(i), v)\n if pause:\n cv2.waitKey()\n else:\n self.ax = None\n nImgs = len(self.imgs)\n fig=plt.figure(figsize=(nImgs * 10,10))\n nCols = nImgs\n nRows = nImgs // nCols\n for i, (k, v) in enumerate(self.imgs.items()):\n fig.add_subplot(1, nImgs, i + 1)\n if len(v.shape) == 3:\n plt.imshow(cv2.cvtColor(v, cv2.COLOR_BGR2RGB))\n else:\n plt.imshow(v)\n plt.show()\n \n def save_3d(self, path):\n max_range = np.array([self.xmax-self.xmin, self.ymax-self.ymin, self.zmax-self.zmin]).max()\n Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(self.xmax+self.xmin)\n Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(self.ymax+self.ymin)\n Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(self.zmax+self.zmin)\n for xb, yb, zb in zip(Xb, Yb, Zb):\n self.ax.plot([xb], [yb], [zb], 'w')\n self.plt.savefig(path, bbox_inches='tight', frameon = False)\n \n def save_img(self, imgId = 'default', path = '../debug/'):\n cv2.imwrite(path + '{}_hr_last.png'.format(imgId), self.imgs[imgId])\n \n def save_all_imgs(self, path = '../debug/'):\n for i, v in self.imgs.items():\n cv2.imwrite(path + '/{}.png'.format(i), v)\n \n" }, { "alpha_fraction": 0.6115702390670776, "alphanum_fraction": 0.6394628286361694, "avg_line_length": 30.91208839416504, "blob_id": "447534728aceed84dee83e6cb00e113829bdc434", "content_id": "cf589fe8a776ecdd8b92d502f5a9211fc3a04d22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2904, "license_type": "no_license", "max_line_length": 76, "num_lines": 91, "path": "/src/demo.py", "repo_name": "nishanth-vimalesh/3D-human-pose-estimation", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\n\nimport os\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.utils.data\nfrom opts import opts\nfrom model import create_model\nfrom utils.debugger import Debugger\nfrom utils.image import get_affine_transform, transform_preds\nfrom utils.eval import get_preds, get_preds_3d\n\n\nfrom config.default import _C as cfg\nfrom config.default import update_config\n\nimage_ext = ['jpg', 'jpeg', 'png']\nmean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)\nstd = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)\n\ndef is_image(file_name):\n ext = file_name[file_name.rfind('.') + 1:].lower()\n return ext in image_ext\n\n\ndef demo_image(image, image_name, model, opt):\n s = max(image.shape[0], image.shape[1]) * 1.0\n c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)\n trans_input = get_affine_transform(\n c, s, 0, [opt.input_w, opt.input_h])\n inp = cv2.warpAffine(image, trans_input, (opt.input_w, opt.input_h),\n flags=cv2.INTER_LINEAR)\n inp = (inp / 255. - mean) / std\n inp = inp.transpose(2, 0, 1)[np.newaxis, ...].astype(np.float32)\n inp = torch.from_numpy(inp).to(opt.device)\n out = model(inp)[-1]\n pred = get_preds(out['hm'].detach().cpu().numpy())[0]\n pred = transform_preds(pred, c, s, (opt.output_w, opt.output_h))\n pred_3d = get_preds_3d(out['hm'].detach().cpu().numpy(), \n out['depth'].detach().cpu().numpy())[0]\n\n path = \"D:\\\\CV-Project\\\\pytorch-pose-hg-3d\\\\images\\\\last_save\\\\\"\n _,image_name = os.path.split(image_name)\n image_name = image_name[:-4]\n \n debugger = Debugger()\n debugger.add_img(image, image_name)\n debugger.add_point_2d(pred, (255, 0, 0), image_name)\n debugger.add_point_3d(pred_3d, 'b')\n debugger.show_all_imgs(pause=False)\n debugger.show_3d(image_name,path)\n debugger.save_img(image_name,path)\n # debugger.save_3d(path)\n\ndef main(opt):\n opt.heads['depth'] = opt.num_output\n if opt.load_model == '':\n opt.load_model = '../models/fusion_3d_var.pth'\n if opt.gpus[0] >= 0:\n opt.device = torch.device('cuda:{}'.format(opt.gpus[0]))\n else:\n opt.device = torch.device('cpu')\n \n model, _, _ = create_model(cfg,opt)\n model = model.to(opt.device)\n model.eval()\n\n if os.path.isdir(opt.demo):\n ls = os.listdir(opt.demo)\n for file_name in sorted(ls):\n if is_image(file_name):\n image_name = os.path.join(opt.demo, file_name)\n print('Running {} ...'.format(image_name))\n image = cv2.imread(image_name)\n demo_image(image, image_name, model, opt)\n elif is_image(opt.demo):\n print('Running {} ...'.format(opt.demo))\n image = cv2.imread(opt.demo)\n demo_image(image, image_name, model, opt)\n \n\nif __name__ == '__main__':\n opt = opts().parse()\n update_config(cfg, opt)\n main(opt)\n" }, { "alpha_fraction": 0.5598236918449402, "alphanum_fraction": 0.5740973949432373, "avg_line_length": 35.36641311645508, "blob_id": "546c25e569d6fa753dff7ba9ff7725d7bb8a0471", "content_id": "bb4cdd1b28ef5de21f95fbd8b25428ca46d91d25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4764, "license_type": "no_license", "max_line_length": 77, "num_lines": 131, "path": "/src/lib/train.py", "repo_name": "nishanth-vimalesh/3D-human-pose-estimation", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\nfrom utils.image import flip, shuffle_lr\nfrom utils.eval import accuracy, get_preds\nimport cv2\nfrom progress.bar import Bar\nfrom utils.debugger import Debugger\nimport time\n\ndef step(split, epoch, opt, data_loader, model, optimizer=None):\n if split == 'train':\n model.train()\n else:\n model.eval()\n \n crit = torch.nn.MSELoss()\n\n acc_idxs = data_loader.dataset.acc_idxs\n edges = data_loader.dataset.edges\n shuffle_ref = data_loader.dataset.shuffle_ref\n mean = data_loader.dataset.mean\n std = data_loader.dataset.std\n convert_eval_format = data_loader.dataset.convert_eval_format\n\n Loss, Acc = AverageMeter(), AverageMeter()\n data_time, batch_time = AverageMeter(), AverageMeter()\n preds = []\n \n nIters = len(data_loader)\n bar = Bar('{}'.format(opt.exp_id), max=nIters)\n \n end = time.time()\n for i, batch in enumerate(data_loader):\n data_time.update(time.time() - end)\n input, target, meta = batch['input'], batch['target'], batch['meta']\n input_var = input.cuda(device=opt.device, non_blocking=True)\n target_var = target.cuda(device=opt.device, non_blocking=True)\n\n output = model(input_var)\n\n loss = crit(output[-1]['hm'], target_var)\n for k in range(opt.num_stacks - 1):\n loss += crit(output[k], target_var)\n\n if split == 'train':\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n else:\n input_ = input.cpu().numpy().copy()\n input_[0] = flip(input_[0]).copy()[np.newaxis, ...]\n input_flip_var = torch.from_numpy(input_).cuda(\n device=opt.device, non_blocking=True)\n output_flip = model(input_flip_var)\n output_flip = shuffle_lr(\n flip(output_flip[-1]['hm'].detach().cpu().numpy()[0]), shuffle_ref)\n output_flip = output_flip.reshape(\n 1, opt.num_output, opt.output_h, opt.output_w)\n # output_ = (output[-1].detach().cpu().numpy() + output_flip) / 2\n output_flip = torch.from_numpy(output_flip).cuda(\n device=opt.device, non_blocking=True)\n output[-1]['hm'] = (output[-1]['hm'] + output_flip) / 2\n pred, conf = get_preds(output[-1]['hm'].detach().cpu().numpy(), True)\n preds.append(convert_eval_format(pred, conf, meta)[0])\n \n Loss.update(loss.detach().item(), input.size(0))\n Acc.update(accuracy(output[-1]['hm'].detach().cpu().numpy(), \n target_var.detach().cpu().numpy(), acc_idxs))\n \n batch_time.update(time.time() - end)\n end = time.time()\n if not opt.hide_data_time:\n time_str = ' |Data {dt.avg:.3f}s({dt.val:.3f}s)' \\\n ' |Net {bt.avg:.3f}s'.format(dt = data_time,\n bt = batch_time)\n else:\n time_str = ''\n Bar.suffix = '{split}: [{0}][{1}/{2}] |Total {total:} |ETA {eta:}' \\\n '|Loss {loss.avg:.5f} |Acc {Acc.avg:.4f}'\\\n '{time_str}'.format(epoch, i, nIters, total=bar.elapsed_td, \n eta=bar.eta_td, loss=Loss, Acc=Acc, \n split = split, time_str = time_str)\n if opt.print_iter > 0:\n if i % opt.print_iter == 0:\n print('{}| {}'.format(opt.exp_id, Bar.suffix))\n else:\n bar.next()\n if opt.debug >= 2:\n gt = get_preds(target.cpu().numpy()) * 4\n pred = get_preds(output[-1]['hm'].detach().cpu().numpy()) * 4\n debugger = Debugger(ipynb=opt.print_iter > 0, edges=edges)\n img = (input[0].numpy().transpose(1, 2, 0) * std + mean) * 256\n img = img.astype(np.uint8).copy()\n debugger.add_img(img)\n debugger.add_mask(\n cv2.resize(target[0].numpy().max(axis=0), \n (opt.input_w, opt.input_h)), img, 'target')\n debugger.add_mask(\n cv2.resize(output[-1]['hm'][0].detach().cpu().numpy().max(axis=0), \n (opt.input_w, opt.input_h)), img, 'pred')\n debugger.add_point_2d(pred[0], (255, 0, 0))\n debugger.add_point_2d(gt[0], (0, 0, 255))\n debugger.show_all_imgs(pause=True)\n\n bar.finish()\n return {'loss': Loss.avg, \n 'acc': Acc.avg, \n 'time': bar.elapsed_td.total_seconds() / 60.}, preds\n \ndef train(epoch, opt, train_loader, model, optimizer):\n return step('train', epoch, opt, train_loader, model, optimizer)\n \ndef val(epoch, opt, val_loader, model):\n return step('val', epoch, opt, val_loader, model)\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n" }, { "alpha_fraction": 0.6214689016342163, "alphanum_fraction": 0.6768796443939209, "avg_line_length": 42.82857131958008, "blob_id": "cd1977d2e10710957b7576152dc08d82afea1c7a", "content_id": "2d0e5121b6ef836869311cdff629326439debf5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4602, "license_type": "no_license", "max_line_length": 336, "num_lines": 105, "path": "/README.md", "repo_name": "nishanth-vimalesh/3D-human-pose-estimation", "src_encoding": "UTF-8", "text": "# Towards 3D Human Pose Estimation in the Wild: a Weakly-supervised Approach\n\nThis repository is the PyTorch implementation for the network presented in:\n\n> Xingyi Zhou, Qixing Huang, Xiao Sun, Xiangyang Xue, Yichen Wei, \n> **Towards 3D Human Pose Estimation in the Wild: a Weakly-supervised Approach**\n> ICCV 2017 ([arXiv:1704.02447](https://arxiv.org/abs/1704.02447))\n\n<p align=\"center\"> \n <img src=\"teaser.png\" width=\"350\"/>\n</p>\n\n## Installation\nThe code was tested with [Anaconda](https://www.anaconda.com/download) Python 3.6 and [PyTorch]((http://pytorch.org/)) v0.4.1. After install Anaconda and Pytorch:\n\n1. Clone the repo:\n\n ~~~\n POSE_ROOT=/path/to/clone/pytorch-pose-hg-3d\n git clone https://github.com/xingyizhou/pytorch-pose-hg-3d POSE_ROOT\n ~~~\n\n\n2. Install dependencies (opencv, and progressbar):\n\n ~~~\n conda install --channel https://conda.anaconda.org/menpo opencv\n conda install --channel https://conda.anaconda.org/auto progress\n ~~~\n3. Disable cudnn for batch_norm (see [issue](https://github.com/xingyizhou/pytorch-pose-hg-3d/issues/16)):\n \n ~~~\n # PYTORCH=/path/to/pytorch\n # for pytorch v0.4.0\n sed -i \"1194s/torch\\.backends\\.cudnn\\.enabled/False/g\" ${PYTORCH}/torch/nn/functional.py\n # for pytorch v0.4.1\n sed -i \"1254s/torch\\.backends\\.cudnn\\.enabled/False/g\" ${PYTORCH}/torch/nn/functional.py\n ~~~\n4. Optionally, install tensorboard for visializing training. \n\n ~~~\n pip install tensorflow\n ~~~\n\n## Demo\n- Download our pre-trained [model](https://drive.google.com/open?id=1_2CCb_qsA1egT5c2s0ABuW3rQCDOLvPq) and move it to `models`.\n- Run `python demo.py --demo /path/to/image/or/image/folder [--gpus -1] [--load_model /path/to/model]`. \n\n`--gpus -1` is for CPU mode. \nWe provide example images in `images/`. For testing your own image, it is important that the person should be at the center of the image and most of the body parts should be within the image. \n\n## Benchmark Testing\nTo test our model on Human3.6 dataset run \n\n~~~\npython main.py --exp_id test --task human3d --dataset fusion_3d --load_model ../models/fusion_3d_var.pth --test --full_test\n~~~\n\nThe expected results should be 64.55mm.\n\n## Training\n- Prepare the training data:\n - Download images from [MPII dataset](http://human-pose.mpi-inf.mpg.de/#download) and their [annotation](https://onedrive.live.com/?authkey=%21AKqtqKs162Z5W7g&id=56B9F9C97F261712%2110696&cid=56B9F9C97F261712) in json format (`train.json` and `val.json`) (from [Xiao et al. ECCV2018](https://github.com/Microsoft/human-pose-estimation.pytorch)).\n - Download [Human3.6M ECCV challenge dataset](http://vision.imar.ro/human3.6m/challenge_open.php).\n - Download [meta data](https://www.dropbox.com/sh/uouev0a1ao84ofd/AADzZChEX3BdM5INGlbe74Pma/hm36_eccv_challenge?dl=0&subfolder_nav_tracking=1) (2D bounding box) of the Human3.6 dataset (from [Sun et al. ECCV 2018](https://github.com/JimmySuen/integral-human-pose)). \n - Place the data (or create symlinks) to make the data folder like: \n \n ```\n ${POSE_ROOT}\n |-- data\n `-- |-- mpii\n `-- |-- annot\n | |-- train.json\n | |-- valid.json\n `-- images\n |-- 000001163.jpg\n |-- 000003072.jpg\n `-- |-- h36m\n `-- |-- ECCV18_Challenge\n | |-- Train\n | |-- Val\n `-- msra_cache\n `-- |-- HM36_eccv_challenge_Train_cache\n | |-- HM36_eccv_challenge_Train_w288xh384_keypoint_jnt_bbox_db.pkl\n `-- HM36_eccv_challenge_Val_cache\n |-- HM36_eccv_challenge_Val_w288xh384_keypoint_jnt_bbox_db.pkl\n ```\n\n- Stage1: Train 2D pose only. [model](https://drive.google.com/open?id=1WqW1-_gCyGTB80m9MK_KUoD0dtElEQzv), [log](https://drive.google.com/open?id=1yKwmGD4MURHnDD5536niPjxe-keY3HGs)\n\n```\npython main.py --exp_id mpii\n```\n\n- Stage2: Train on 2D and 3D data without geometry loss (drop LR at 45 epochs). [model](https://drive.google.com/open?id=13d3AqzA85TSO7o1F8aq_ptnAkJ7LSp9-), [log](https://drive.google.com/open?id=18B_aOM9djCHZFlB0Rcoa6zOK1eXvsmRl)\n\n```\npython main.py --exp_id fusion_3d --task human3d --dataset fusion_3d --ratio_3d 1 --weight_3d 0.1 --load_model ../exp/mpii/model_last.pth --num_epoch 60 --lr_step 45\n```\n\n- Stage3: Train with geometry loss. [model](https://drive.google.com/open?id=1_2CCb_qsA1egT5c2s0ABuW3rQCDOLvPq), [log](https://drive.google.com/open?id=1hV4V74lTUd3COnoe1XMiTb8EUcyI8obN)\n\n```\npython main.py --exp_id fusion_3d_var --task human3d --dataset fusion_3d --ratio_3d 1 --weight_3d 0.1 --weight_var 0.01 --load_model ../models/fusion_3d.pth --num_epoch 10 --lr 1e-4\n```\n" }, { "alpha_fraction": 0.6218889355659485, "alphanum_fraction": 0.630185067653656, "avg_line_length": 27.75229263305664, "blob_id": "899f0fbda48a45c4461fab361e66f29f1de198e0", "content_id": "9d185926cdbe8b73828e00095d365caeb8fa9372", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3134, "license_type": "no_license", "max_line_length": 78, "num_lines": 109, "path": "/src/main.py", "repo_name": "nishanth-vimalesh/3D-human-pose-estimation", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\n\nimport os\n\nimport torch\nimport torch.utils.data\nfrom opts import opts\nfrom model import create_model, save_model\nfrom datasets.mpii import MPII\nfrom datasets.coco import COCO\nfrom datasets.fusion_3d import Fusion3D\nfrom logger import Logger\nfrom train import train, val\nfrom train_3d import train_3d, val_3d\nimport scipy.io as sio\n\nfrom config.default import _C as cfg\nfrom config.default import update_config\n\ndataset_factory = {\n 'mpii': MPII,\n 'coco': COCO,\n 'fusion_3d': Fusion3D\n}\n\ntask_factory = {\n 'human2d': (train, val), \n 'human3d': (train_3d, val_3d)\n}\n\ndef main(opt):\n\n update_config(cfg, opt)\n\n if opt.disable_cudnn:\n torch.backends.cudnn.enabled = False\n print('Cudnn is disabled.')\n\n logger = Logger(opt)\n opt.device = torch.device('cuda:{}'.format(opt.gpus[0]))\n\n Dataset = dataset_factory[opt.dataset]\n train, val = task_factory[opt.task]\n\n model, optimizer, start_epoch = create_model(cfg, opt)\n \n if len(opt.gpus) > 1:\n model = torch.nn.DataParallel(model, device_ids=opt.gpus).cuda(opt.device)\n else:\n model = model.cuda(opt.device)\n\n val_loader = torch.utils.data.DataLoader(\n Dataset(opt, 'val'), \n batch_size=1, \n shuffle=False,\n num_workers=1,\n pin_memory=True\n )\n\n if opt.test:\n log_dict_train, preds = val(0, opt, val_loader, model)\n sio.savemat(os.path.join(opt.save_dir, 'preds.mat'),\n mdict = {'preds': preds})\n return\n\n train_loader = torch.utils.data.DataLoader(\n Dataset(opt, 'train'), \n batch_size=opt.batch_size * len(opt.gpus), \n shuffle=True, # if opt.debug == 0 else False,\n num_workers=opt.num_workers,\n pin_memory=True\n )\n \n best = -1\n for epoch in range(start_epoch, opt.num_epochs + 1):\n mark = epoch if opt.save_all_models else 'last'\n log_dict_train, _ = train(epoch, opt, train_loader, model, optimizer)\n for k, v in log_dict_train.items():\n logger.scalar_summary('train_{}'.format(k), v, epoch)\n logger.write('{} {:8f} | '.format(k, v))\n if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:\n save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)), \n epoch, model, optimizer)\n log_dict_val, preds = val(epoch, opt, val_loader, model)\n for k, v in log_dict_val.items():\n logger.scalar_summary('val_{}'.format(k), v, epoch)\n logger.write('{} {:8f} | '.format(k, v))\n if log_dict_val[opt.metric] > best:\n best = log_dict_val[opt.metric]\n save_model(os.path.join(opt.save_dir, 'model_best.pth'), \n epoch, model)\n else:\n save_model(os.path.join(opt.save_dir, 'model_last.pth'), \n epoch, model, optimizer)\n logger.write('\\n')\n if epoch in opt.lr_step:\n lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))\n print('Drop LR to', lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n logger.close()\n\nif __name__ == '__main__':\n opt = opts().parse()\n main(opt)\n" }, { "alpha_fraction": 0.5747692584991455, "alphanum_fraction": 0.5948718190193176, "avg_line_length": 41.76315689086914, "blob_id": "510718ae90ecef062442d50063b78dd9accbc911", "content_id": "67400ef76ef3fdbaacc58ba58c10ade9c333656a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4875, "license_type": "no_license", "max_line_length": 85, "num_lines": 114, "path": "/src/lib/opts1.py", "repo_name": "nishanth-vimalesh/3D-human-pose-estimation", "src_encoding": "UTF-8", "text": "import argparse\nimport os\nimport sys\n\nclass opts():\n def __init__(self):\n self.parser = argparse.ArgumentParser()\n\n self.parser.add_argument('--exp_id', default = 'test')\n self.parser.add_argument('--gpus', default='0', help='-1 for CPU')\n self.parser.add_argument('--num_workers', type=int, default=4)\n self.parser.add_argument('--test', action = 'store_true', help = 'test')\n self.parser.add_argument('--debug', type = int, default = 0 )\n self.parser.add_argument('--demo', default = '', help = 'path/to/image')\n\n self.parser.add_argument('--task', default='human3d')\n self.parser.add_argument('--ratio_3d', type=float, default=1)\n self.parser.add_argument('--weight_3d', type=float, default=0.1)\n self.parser.add_argument('--weight_var', type=float, default=0.01)\n self.parser.add_argument('--full_test', action='store_true')\n\n\n self.parser.add_argument('--hide_data_time', action = 'store_true')\n self.parser.add_argument('--metric', default = 'acc')\n self.parser.add_argument('--resume', action = 'store_true')\n self.parser.add_argument('--load_model', default = '../models/fusion_3d_var.pth')\n self.parser.add_argument('--weight_decay', type=float, default=0.0)\n self.parser.add_argument('--scale', type=float, default=-1)\n self.parser.add_argument('--rotate', type=float, default=-1)\n self.parser.add_argument('--flip', type = float, default=0.5)\n self.parser.add_argument('--dataset', default = 'fusion_3d', \n help = 'mpii | coco')\n self.parser.add_argument('--all_pts', action = 'store_true',\n help = 'heatmap for all persons in stack 1')\n self.parser.add_argument('--multi_person', action = 'store_true', \n help = 'heatmap for all persons in final stack')\n self.parser.add_argument('--fit_short_side', action = 'store_true', \n help = 'fit to long or short bbox side when'\n 'the input resolution is rectangle')\n self.parser.add_argument('--lr', type=float, default=0.0001)\n self.parser.add_argument('--lr_step', type=str, default='45')\n self.parser.add_argument('--num_epochs', type=int, default=10)\n self.parser.add_argument('--val_intervals', type=int, default=20)\n self.parser.add_argument('--batch_size', type=int, default=16)\n self.parser.add_argument('--arch', default = 'msra_50', \n help = 'hg | msra_xxx')\n self.parser.add_argument('--disable_cudnn', action = 'store_true')\n self.parser.add_argument('--save_all_models', action = 'store_true')\n self.parser.add_argument('--print_iter', type = int, default = -1, \n help = 'for run in cloud server')\n\n self.parser.add_argument('--input_h', type = int, default = -1)\n self.parser.add_argument('--input_w', type = int, default = -1)\n self.parser.add_argument('--output_h', type = int, default = -1)\n self.parser.add_argument('--output_w', type = int, default = -1)\n\n def parse(self, args = ''):\n if args == '':\n opt = self.parser.parse_args()\n else:\n opt = self.parser.parse_args(args)\n \n opt.eps = 1e-6\n opt.momentum = 0.0\n opt.alpha = 0.99\n opt.epsilon = 1e-8\n opt.hm_gauss = 2\n opt.root_dir = os.path.join(os.path.dirname(__file__), '..', '..')\n opt.data_dir = os.path.join(opt.root_dir, 'data')\n opt.exp_dir = os.path.join(opt.root_dir, 'exp')\n\n opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id)\n if opt.debug > 0:\n opt.num_workers = 1\n\n opt.gpus = [int(gpu) for gpu in opt.gpus.split(',')]\n opt.lr_step = [int(i) for i in opt.lr_step.split(',')]\n if opt.test:\n opt.exp_id = opt.exp_id + 'TEST'\n opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id)\n\n if 'hg' in opt.arch or 'posenet' in opt.arch:\n opt.num_stacks = 2\n else:\n opt.num_stacks = 1\n \n if opt.input_h == -1 and opt.input_w == -1 and \\\n opt.output_h == -1 and opt.output_w == -1:\n if opt.dataset == 'coco':\n opt.input_h, opt.input_w = 256, 192\n opt.output_h, opt.output_w = 64, 48\n else:\n opt.input_h, opt.input_w = 256, 256\n opt.output_h, opt.output_w = 64, 64\n else:\n assert opt.input_h // opt.output_h == opt.input_w // opt.output_w\n \n if opt.scale == -1:\n opt.scale = 0.3 if opt.dataset == 'coco' else 0.25\n if opt.rotate == -1:\n opt.rotate = 40 if opt.dataset == 'coco' else 30\n\n opt.num_output = 17 if opt.dataset == 'coco' else 16\n opt.num_output_depth = opt.num_output if opt.task == 'human3d' else 0\n opt.heads = {'hm': opt.num_output}\n if opt.num_output_depth > 0:\n opt.heads['depth'] = opt.num_output_depth\n print('heads', opt.heads)\n\n\n if opt.resume:\n opt.load_model = '{}/model_last.pth'.format(opt.save_dir)\n\n return opt\n" } ]
9
TrafeX/irssi-notifier-redis
https://github.com/TrafeX/irssi-notifier-redis
fc994f5c885474b32abc314bcfbb65594762c2b7
159663c70f85e64c52fb9663db937e11786511ac
d92ac1951e0bcf5dfd17227fa57e22a461453e08
refs/heads/master
2021-01-22T05:10:32.814410
2013-08-30T10:08:35
2013-08-30T10:08:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5923243761062622, "alphanum_fraction": 0.5984793901443481, "avg_line_length": 30.033708572387695, "blob_id": "6ebe6db72af96457edcb56b3cf61fc34b4372c0e", "content_id": "ef8d50059bd7304092e3ff358a7b406df793d4be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2762, "license_type": "no_license", "max_line_length": 72, "num_lines": 89, "path": "/listen_redis.py", "repo_name": "TrafeX/irssi-notifier-redis", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#\n# Irssi notifier using Redis\n#\n# Copyright (c) 2012, Tim de Pater <code AT trafex DOT nl>\n# <https://github.com/TrafeX/irssi-notifier-redis>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nimport sys\nimport dbus\nimport redis\nimport threading\nimport config\n\nclass ListenThread(threading.Thread):\n def __init__(self):\n self.bus = False\n self.notifyservice = False\n self.notifyid = 0\n threading.Thread.__init__(self)\n self.setDaemon(False)\n\n def run(self):\n print threading.currentThread().getName(), 'Starting'\n r = redis.StrictRedis(\n host=config.redis['server'],\n port=config.redis['port'],\n password=config.redis['password'],\n db=0\n )\n\n ps = r.pubsub()\n ps.subscribe(['irssi'])\n\n for item in ps.listen():\n print item\n msg = str(item['data']).partition(' ')\n if item['type'] == 'message' and len(msg[2]) > 0:\n self.notify(msg[0], msg[2])\n print threading.currentThread().getName(), 'Exiting'\n\n def notify(self, channel, msg):\n self.bus = dbus.Bus(dbus.Bus.TYPE_SESSION)\n # Connect to notification interface on DBUS.\n self.notifyservice = self.bus.get_object(\n 'org.freedesktop.Notifications',\n '/org/freedesktop/Notifications'\n )\n self.notifyservice = dbus.Interface(\n self.notifyservice,\n \"org.freedesktop.Notifications\"\n )\n # The second param is the replace id, so get the notify id back,\n # store it, and send it as the replacement on the next call.\n self.notifyservice.Notify(\n \"Irssi-notify\",\n self.notifyid,\n sys.path[0] + \"/icon-irc.png\",\n channel,\n msg,\n [],\n {},\n 5000\n )\n\nif __name__ == '__main__':\n try:\n thread = ListenThread()\n thread.start();\n\n except ValueError as strerror:\n print strerror\n except KeyboardInterrupt:\n print \"\\nStopping monitor..\\n\"\n sys.exit(0)\n except:\n raise\n" }, { "alpha_fraction": 0.7258753180503845, "alphanum_fraction": 0.7335610389709473, "avg_line_length": 32.42856979370117, "blob_id": "16e88e48f64cabded324b05a98aa36b02ad72bba", "content_id": "89b38cb5016b4d1f861ff8fc37b54794b23f64df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1171, "license_type": "no_license", "max_line_length": 137, "num_lines": 35, "path": "/README.md", "repo_name": "TrafeX/irssi-notifier-redis", "src_encoding": "UTF-8", "text": "Irssi Notifier Using Redis\n==========================\n*Running irssi on a remote server and want to receive notifications on your local computer?*\n\nThis irssi notifier uses Redis to connect your local computer to your remote irssi.\nNotifications will be displayed using the Freedesktop Notifications, meaning you'll see a nice notification bubble when you're on Ubuntu.\n\nInstall\n=======\n\nIrssi server\n------------\n* Install Redis server: http://redis.io/download\n* Install Redis with cpan: http://search.cpan.org/~melo/Redis-1.955/lib/Redis.pm\n* Place notify_redis.pl in ~/.irssi/scripts/autorun\n* Edit notify_redis.pl to configure your Redis server\n* Start irssi and type;\n<pre>\n/load perl\n/script load autorun/notify_redis.pl\n</pre>\n\nLocal client\n-------------\n* Install redis-py: https://github.com/andymccurdy/redis-py\n* Copy config.dist.py to config.py and enter the correct settings\n* Start listen_redis.py\n* Enjoy!\n\nSecurity\n========\nDon't forget to secure your server and enable the requirepass option in your redis.conf.\n\n\n[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/TrafeX/irssi-notifier-redis/trend.png)](https://bitdeli.com/free \"Bitdeli Badge\")\n\n" } ]
2
ichwanudin28/penambangan_dan-pencarian_web_membuat_grap
https://github.com/ichwanudin28/penambangan_dan-pencarian_web_membuat_grap
0e2a5b5b67f1d8bf1d9ca01b70496b4a88b6a68e
ab7e5555e1566242b62a7a11482e03985f8b4063
ad41ce234debd6c4b91d84271fd5937e3e4c011e
refs/heads/master
2020-05-26T14:39:58.201350
2019-06-21T02:11:20
2019-06-21T02:11:20
188,268,745
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5013850331306458, "alphanum_fraction": 0.5060018301010132, "avg_line_length": 28.16216278076172, "blob_id": "d4e44aee50babd4ad4b2c2f4ec9314b81db04294", "content_id": "69de7b0182c2350345e1f95fd8d1cecacbc4d4bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2166, "license_type": "no_license", "max_line_length": 87, "num_lines": 74, "path": "/source/crawl.py", "repo_name": "ichwanudin28/penambangan_dan-pencarian_web_membuat_grap", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\ndef getLinks(url):\n req = requests.get(url)\n soup = BeautifulSoup(req.text, 'html.parser')\n news_links = soup.find_all('a',{'class':'mod-judul-post'}, href=True)\n for link in news_links:\n listUrl.append(link['href'])\n node.append(link['href'])\n \n n_depth = 3\n swap_depth = 0\n swap_list = 0\n while swap_depth < n_depth :\n listUrl.append(\"depth\")\n next_depth = False\n while next_depth == False:\n if listUrl[swap_list] == \"depth\" :\n swap_list+=1\n next_depth = True\n swap_depth+=1\n break\n try :\n urls = listUrl[swap_list]\n reqs = requests.get(urls)\n soups = BeautifulSoup(reqs.text, 'html.parser')\n news_linkss = soups.find_all('a',{'class':'vrp-thumb-link'}, href=True)\n for link in news_linkss:\n listUrl.append(link['href'])\n \n if link['href'] not in node :\n node.append(link['href'])\n value = (str(node.index(urls)),str(node.index(link['href'])))\n edge.append(value)\n \n except :\n pass\n swap_list+=1\n swap_depth+=1\n \nif __name__== \"__main__\":\n listUrl = []\n node = []\n edge = []\n \n \n links = getLinks(\"http://lintasperistiwa.com/\")\n #print(listUrl)\n G=nx.DiGraph()\n pages = []\n for i in range(0,len(node)):\n pages.append(str(i))\n \n G.add_nodes_from(pages)\n G.add_edges_from(edge)\n \n print(edge)\n print(type(edge))\n #print(type(edge[0]))\n #membuat graf\n image = nx.draw_circular(G,node_color='blue', with_labels = True)\n plt.savefig(\"grap.png\", format=\"PNG\")\n #mancari nilai pagerank\n PR = nx.pagerank(G)\n print(PR)\n value = max(PR, key=PR.get)\n print(\"node : \" + max(PR,key=PR.get))\n print(\"link : \" + node[int(value)])\n nx.draw(G)\n plt.show()\n print(node)\n \n\n \n" }, { "alpha_fraction": 0.683253288269043, "alphanum_fraction": 0.7101256847381592, "avg_line_length": 35.796993255615234, "blob_id": "6a42370bed8c48f2a8598ee65b80c26bfb91265a", "content_id": "a539e3d047181ca1d1fd9a958f4f56eb46afdfd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9789, "license_type": "no_license", "max_line_length": 386, "num_lines": 266, "path": "/docs/index.mdX", "repo_name": "ichwanudin28/penambangan_dan-pencarian_web_membuat_grap", "src_encoding": "UTF-8", "text": "# **Tutorial crawler link wikipedia menggunakan scrapy dan menghitung pagerank menggunakan networkx**\n\n## **1. scrapy**\n\n> scrapy menurut [wikipedia](https://en.wikipedia.org/wiki/Scrapy) Scrapy (/ˈskreɪpi/ skray-pee) adalah framework gratis dan opensource python yang di desain untuk *web scrapping* dan juga bisa digunakan untuk mengekstrak data menggunakan API maupun seperti web crawler lain pada umumnya , saat ini scrapy dikelola oleh Scrapinghub Ltd.\n\n## 1.1 pemasangan scrapy\n\npastikan sudah memasang [python](https://www.python.org/) versi terbaru dan melakukan centang pada pip \n\n![gambar 1](D:\\kampus\\semester 8\\penambangan web\\tutorialCrawler\\gambar\\gambar 1.PNG)\n\nagar bisa menggunakan perintah pip di cmd\n\nsetelah memasang python , maka install scrapy mengggunakan perintah `pip install scrapy`\n\njika gagal menginstall scrapy silahkan install terlebih dahulu yang diminta scrapy bisa saja .net framework ataupun visual studio 14 tergantung keadaan komputer masing masing \n\n## 1.2 membuat project scrapy\n\nsaya menggunakan project scrapy yang sama dengan tutorial sebelumnya , silahkan cek tutorial berikut \"\" \n\n## 1.3 mengatur domain\n\ndomain digunakan untuk membatasi lingkup situs yang dijelajahi oleh crawler ,\n\nbuka file **tugasAkhir.py** yang berada di folder spiders lalu atur `allowed_domains = ['id.wikipedia.org']` untuk membatasi link yang dijelajahi crawler tetap berada di domain `id.wikipedia.org`\n\n## 1.4 mengatur startPage\n\nstartpage berguna memberi tahu crawler link awal tempat dia mulai menjelajah seperti berikut `start_urls = ['https://id.wikipedia.org/wiki/Bank_Indonesia']` di skenario ini saya akan melakukan crawl **link internal** wikipedia dan akan dibuat graphnya untuk menghitung page rank masing masing halaman\n\n## 1.5 mengatur data yang akan diambil\n\ndata yang akan diambil adalah link internal wikipedia indonesia yang akan disusun menjadi graph , karena saya tidak mengatur rule pada project ini maka kita gunakan method default scrapy yaitu `parse` disini saya juga menggunakan bantuan beautifulsoup untuk antuan extraksi tag html , dan juga link wikipedia perlu di perbaiki karena jika di crawl langsung disimpan akan seperti ini \n\n![1558928886876](C:\\Users\\zainal\\AppData\\Roaming\\Typora\\typora-user-images\\1558928886876.png)\n\nmaka diperlukan perbaikan link terlebih dahulu sebelum disimpan dan dijelajahi\n\n\n\n```python\n def parse(self, response):\n global situsKe\n situsKe+=1\n linkKe = 0\n deep = 1\n \n soup = BeautifulSoup(response.body , features=\"lxml\")\n #inisialisasi beatifulsoup\n \n print(\"situsKe = \",situsKe ,\" url = \",response.url)\n links = soup.find_all('a') #select seluruh tag <a></a> di html\n \n linkDiperbaiki=list() #membuat list untuk memperbaiki link di wikipedia\n \n for x in links:\n tmp = str(x.get('href'))\n if((\"/wiki/\" in tmp[0:6]) and (\":\" not in tmp)):\n \t\t\t#melakukan pengecekan apakah item link sesuai kriteria yaitu awalannya \"/wiki/\" karena inilah link internal yang akan dicari , dan tidak terdapat char \":\" karena ini indikasi dia merujuk ke halaman itu sendiri \n tmp1 = str(\"https://id.wikipedia.org\" + tmp) #perbaikan link dengan menambahkan domain \n linkDiperbaiki.append(tmp1)\n\n print(\"panjang links = \",len(linkDiperbaiki))\n for x in linkDiperbaiki: #proses simpan link \n linkKe+=1\n print(\"linkKe = \",linkKe,\"url = \",x)\n item = ItemTugasAkhir()\n item['url'] = response.url\n item['link_keluar'] = x\n item['situsKe'] = situsKe\t\t\t\t\n item['linkKe'] = linkKe\n item['deep'] = deep\n yield item\n for x in linkDiperbaiki: # menjelajahi link tsb \n next_page = response.urljoin(x)\n yield scrapy.Request(next_page, callback=self.parse_deep2)\n print(\"===============================end deep1===========================\")\n```\n\ncopy sampai 3 def parse3 agar crawler kita menjelajah sampai 3 kedalaman\n\n## 1.6 crawl data\n\nuntuk crawling data bisa menggunakan perintah `scrapy crawl --nolog TugasAkhir -o data.csv -t csv` data hasil crawling akan disimpan di data.csv \n\nproses crawling seperti berikut tunggu hingga selesai\n\n**perintah sama dengan tutorial sebelumnya karena memang ini project sebellumnya yang dimodifikasi**\n\n![1558929375374](C:\\Users\\zainal\\AppData\\Roaming\\Typora\\typora-user-images\\1558929375374.png)\n\nsetelah selesai data akan menjadi seperti berikut :\n\n![1558929728350](C:\\Users\\zainal\\AppData\\Roaming\\Typora\\typora-user-images\\1558929728350.png)\n\n## 2. migrasi csv ke sqlite\n\nsaya menggunakan tools [db browser for sqlite](https://sqlitebrowser.org/) disini sangat mudah klik new database ketika muncul pop up untuk mengisi field apa saja yang ada di db close saja \n\n![1555675137009](C:\\Users\\zainal\\AppData\\Roaming\\Typora\\typora-user-images\\1555675137009.png)\n\nsetelah itu di menu file pilih import table form csv lali pilih file csv tadi dan lakukan proses migrasi\n\n![1558929838634](C:\\Users\\zainal\\AppData\\Roaming\\Typora\\typora-user-images\\1558929838634.png)\n\ntunggu hingga proses selesai \n\n ![1555675509234](C:\\Users\\zainal\\AppData\\Roaming\\Typora\\typora-user-images\\1555675509234.png)\n\n## 3. membuat graph menggunakan networkx\n\nsaya asumsikan data yang akan kita olah sudah ada di sqlite , saya membuat file bernama `networkx.py` yang digunakan untuk proses pembuatan graph dan menghitung pagerank\n\n\n\n```python\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport sqlite3\n\ndef koneksi(db_file):\n\ttry:\n\t\tconn = sqlite3.connect(db_file)\n\t\treturn conn\n\texcept Error as e:\n\t\tprint(e)\n\treturn None\ndef main(): \n graph=nx.Graph() #inisialisasi networkx \n node = set() #inisialisasi node\n\n database = \"data.sqlite\" # inisialisasi database dan membuat koneksi\n conn = koneksi(database)\n\n\n \n```\n\n\n\n## 3.1 menambahkan node\n\nnode adalah tiap halaman yang dikunjungi maupun link yang terdapat di halaman tsb , link yang terdapat di suatu halaman harus juga dibuat node nya agar saat penambahan edge tidak terjadi kesalahan karena edge merujuk ke suatu node yang tidak di inisialisasi\n\n```python\ndef ambilNode(conn):\n node = set()\n \n cur = conn.cursor()\n cur.execute(\"select DISTINCT link_keluar from data\") # mengambil link yang terkandung diseluruh halaman yang dikunjungi\n rows = cur.fetchall()\n for row in rows:\n tmp = str(row[0])\n node.add(tmp)\n \n cur = conn.cursor()\n cur.execute(\"select DISTINCT url from data\") # mengambil halaman yang telah dikunjungi\n rows = cur.fetchall()\n for row in rows:\n tmp = str(row[0])\n node.add(tmp)\n \n return node\n```\n\nmethod diatas panggil di main dengan parameter koneksi tadi\n\n```python\nwith conn: \n node = ambilNode(conn)\ngraph.add_nodes_from(node)\n```\n\n\n\n## 3.2 menambahkan edge\n\nmenurut [dokumentasi networkx](https://networkx.github.io/documentation/stable/reference/classes/generated/networkx.Graph.add_edge.html) untuk menambahkan edge bisa menggunakan method `add_edges_from()` dengan parameter list yang berisi pasangan-pasangan node yang terhubung misalnya \n\nnode a\n\nnode b\n\nnode c\n\nuntuk menghubungkan ketiga node list haruslah berbentuk `[[node a , node b],[node b , node c],[node c , node a]]` \n\n\n\n```python\ndef ambilEdge(conn):\n kumpulanEdge = list()\n primNode = list()\n \n cur = conn.cursor()\n cur.execute(\"select DISTINCT url from data\") #select seluruh halaman yang pernah dikunjungi\n rows = cur.fetchall()\n for row in rows:\n \n tmp = str(row[0])\n primNode.append(tmp)\n \n for x in primNode:\n edge = list()\n cur = conn.cursor()\n query = \"select link_keluar from data where url='\"+str(x)+\"'\" #select seluruh link yang terkandung di masing masing halaman yang pernah dikunjungi\n\n cur.execute(query)\n rows = cur.fetchall()\n for row in rows:\n tmp =[str(x),str(row[0])] \n edge.append(tmp) #menambahkan pasangan - pasangan node\n kumpulanEdge.append(edge)#menambahkan seluruh pasangan suatu node ke Kumpulan edge , nantinya KumpulanEdge akan berisi seluruh node yang terhubung ke seluruh masing masing node\n return kumpulanEdge\n```\n\nfungsi tadi dipanggil di main dengan parameter koneksi , sehingga keseluruhan fungsi main seperti berikut , karena Kumpulanedge tadi berisi kumpulan maka harus dilakukan perulangan agar bisa menambahkan edge masing-masing node\n\n```python\ndef main(): \n graph=nx.Graph()\n node = set()\n database = \"data.sqlite\"\n conn = koneksi(database)\n with conn: \n node = ambilNode(conn)\n kumpulanEdge = ambilEdge(conn)\n graph.add_nodes_from(node)\n for x in kumpulanEdge :\n # print(\"===== edge =====\" , kumpulanEdge[x])\n graph.add_edges_from(x)\n```\n\n## 3.3 menampilkan graph\n\nuntuk menampilkan graph dibutuhkan matplotlib library bantuan untuk menampilkan graph cukup dengan fungsi berikut\n\n```python\nnx.draw(graph)\nplt.show()\n```\n\nmaka graph akan tampil\n\n![1559223397569](C:\\Users\\zainal\\AppData\\Roaming\\Typora\\typora-user-images\\1559223397569.png)\n\n## 4. menghitung pagerank dengan networkx\n\ndari graph yang telah disusun sebelumnya kita bisa menghitung pagerank menggunkan code berikut\n\n```python\npr = nx.pagerank(graph) #menghitung pagerank \nsorted_pr = sorted(pr.items() , reverse = True, key=lambda kv: kv[1]) # mengurutkan pagerank dari nilai yang terbesar\n\nprint(\"========== Top 3 pagerank ==========\") #mencetak 3 pagerank tertinggi\nprint(\"1. \", sorted_pr[0])\nprint(\"2. \", sorted_pr[1])\nprint(\"3. \", sorted_pr[2])\n\n```\n\n![1559226072683](C:\\Users\\zainal\\AppData\\Roaming\\Typora\\typora-user-images\\1559226072683.png)\n\nfull code ada di repo berikut :" } ]
2
lsz1995/tencent
https://github.com/lsz1995/tencent
db28cd0c69d3bc29c3011b5583bc5f1a02926fa1
82c29efa2f1877d46f11e406339d22bfe8e71d21
d0fb717b7899205e9fdd3884fa1ec702ecfccef2
refs/heads/master
2020-03-16T00:23:51.574545
2018-05-07T06:16:10
2018-05-07T06:16:10
132,415,538
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6371463537216187, "alphanum_fraction": 0.6383763551712036, "avg_line_length": 31.520000457763672, "blob_id": "dbb2d6305e0d7ca6689d41fbcdd07930e2fec15a", "content_id": "9cf76c2296d1df0f08c31e428fb23655456502a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 861, "license_type": "no_license", "max_line_length": 154, "num_lines": 25, "path": "/tencent/pipelines.py", "repo_name": "lsz1995/tencent", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport json\nimport csv\nclass TencentPipeline(object):\n\n def __init__(self):\n self.f = open(\"tencent.csv\", \"w\")\n self.writer = csv.writer(self.f)\n self.writer.writerow(['职位名称', '职位链接', '职位类别', '人数', '工作地点', '发布时间'])\n\n\n def process_item(self, item, spider):\n tencent_list = [item['positionName'], item['positionLink'], item['positionType'], item['peopleNumber'],item['workLocation'], item['publishTime']]\n print(tencent_list)\n self.writer.writerow(tencent_list)\n return item\n def close_spider(self, spider):#关闭\n self.writer.close()\n self.f.close()\n" }, { "alpha_fraction": 0.5415019989013672, "alphanum_fraction": 0.5517786741256714, "avg_line_length": 26.9777774810791, "blob_id": "450bc27dedaea1282f981dd29f71912781d2e4c1", "content_id": "6a034b7590e416beb274a4f66e2e1b9a9529317d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1301, "license_type": "no_license", "max_line_length": 80, "num_lines": 45, "path": "/tencent/spiders/tencentspider.py", "repo_name": "lsz1995/tencent", "src_encoding": "UTF-8", "text": "from scrapy import Spider,Request\nfrom tencent.items import TencentItem\nfrom lxml import etree\n\n\n\n\n\nclass TencentspiderSpider(Spider):\n name = 'tencentspider' #爬虫名\n allowed_domains = ['tencent.com'] #指定爬取的域名\n\n\n\n\n\n def start_requests(self):\n for i in range(0,500,10):\n url = 'https://hr.tencent.com/position.php?&start={}'.format(str(i))\n yield Request(url = url,callback=self.parse)\n\n\n def parse(self, response):\n #获取所有tr列表标签\n selector = etree.HTML(response.text)\n node_list = selector.xpath('//tr[@class=\"even\"]|//tr[@class=\"odd\"]')\n #node_list = response.xpath('//tr[@class=\"even\"]|//tr[@class=\"odd\"]')\n\n for node in node_list:\n item = TencentItem()\n item['positionName'] = node.xpath('./td[1]/a/text()')\n\n\n item['positionLink']=node.xpath('./td[1]/a/@href')\n\n if len(node.xpath('./td[2]/text()')):\n item['positionType'] = node.xpath('./td[2]/text()')\n else:\n item['positionType'] = \"\"\n\n\n item['peopleNumber']=node.xpath('./td[3]/text()')\n item['workLocation']=node.xpath('./td[4]/text()')\n item['publishTime']=node.xpath('./td[5]/text()')\n yield item\n\n\n\n\n\n\n" } ]
2
YasLbk/Objectdetection_SmartCity_Camera.RaspberryPI3
https://github.com/YasLbk/Objectdetection_SmartCity_Camera.RaspberryPI3
2567724ffe007ab9876b630a0a57405d437a69de
7d469ef9a77361b2b48937487e7069984e1f5714
88cad4a787968315d599267e55800fb09ec4d370
refs/heads/main
2023-02-17T23:25:41.631611
2021-01-22T03:08:19
2021-01-22T03:08:19
331,810,388
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7697160840034485, "alphanum_fraction": 0.7760252356529236, "avg_line_length": 34.22222137451172, "blob_id": "8799ab9583f3ac1d747ae516184ba79ec5e075b6", "content_id": "09340407dc554f4bf9464519b42731ca9bb0e8aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 317, "license_type": "no_license", "max_line_length": 81, "num_lines": 9, "path": "/README.md", "repo_name": "YasLbk/Objectdetection_SmartCity_Camera.RaspberryPI3", "src_encoding": "UTF-8", "text": "# Objectdetection_SmartCity_Camera.RaspberryPI3\n\nFor further information about the project, visit this page :\n\thttps://dl.orangedox.com/hllh7c and please contact us if you have any questions.\n \n \n![Atable](atable_output.jpg)\n![Bus.Tram.Velo](bustramvelo_output.png)\n![Place.HommeDeFer](place_hommedefer_output.jpg)\n" }, { "alpha_fraction": 0.6243709325790405, "alphanum_fraction": 0.6509705185890198, "avg_line_length": 31.325580596923828, "blob_id": "7628bffca472f87e892747194a9f28f4b393ad15", "content_id": "9659b6caf3b98dee44f7b266d73cc7d1378a794b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2792, "license_type": "no_license", "max_line_length": 96, "num_lines": 86, "path": "/yolo_objdet.py", "repo_name": "YasLbk/Objectdetection_SmartCity_Camera.RaspberryPI3", "src_encoding": "UTF-8", "text": "# import classes\nimport cv2\nimport numpy as np\nimport sys\n\n\ninputfile = str(sys.argv[1])\noutputfile = str(sys.argv[2])\n\n\nprint(\"Input file is \", inputfile)\nprint(\"Output file is \", outputfile)\n\n\n# Load Yolo\n# Weight file: it’s the trained model\n# Cfg file: it’s the configuration file,settings of the algory=ithm.\nnet = cv2.dnn.readNet(\"yolov3.weights\", \"yolov3.cfg\")\n# Extract name of all objects possible to be detected\nclasses = []\nwith open(\"object.fr\", \"r\") as f:\n classes = [line.strip() for line in f.readlines()]\nlayer_names = net.getLayerNames()\noutput_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\ncolors = np.random.uniform(0, 255, size=(len(classes), 3))\n\n\n# Loading image\nimg = cv2.imread(inputfile)\n# img = cv2.resize(img, None, fx=0.4, fy=0.4)\nheight, width, channels = img.shape\n\n# Blob it’s used to extract feature from the image and to resize them. YOLO accepts three sizes:\n# 320 - 609 - 416\n# The outs on line 21 it’s the result of the detection. Outs is an array that conains\n# all the informations about objects detected, their position and the\n# confidence about the detection.\n# Detecting objects\nblob = cv2.dnn.blobFromImage(\n img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\nnet.setInput(blob)\nouts = net.forward(output_layers)\n\n# Showing informations on the screen\nclass_ids = []\nconfidences = []\nboxes = []\nfor out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n # threshold score = 0.5\n if confidence > 0.5:\n # Object detected\n center_x = int(detection[0] * width)\n center_y = int(detection[1] * height)\n w = int(detection[2] * width)\n h = int(detection[3] * height)\n # Rectangle coordinates\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n boxes.append([x, y, w, h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n\n# Non maximum suppresion to remove the noise in case of having more boxes for the\n# same object\nindexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n\n\n# extract all the informations and show them on the screen.\n\n# Box: contain the coordinates of the rectangle sorrounding the object detected.\n# Label: it’s the name of the object detected\n# Confidence: the confidence about the detection from 0 to 1.\nfont = cv2.FONT_HERSHEY_PLAIN\nfor i in range(len(boxes)):\n if i in indexes:\n x, y, w, h = boxes[i]\n label = str(classes[class_ids[i]])\n color = colors[i]\n cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)\n cv2.putText(img, label, (x, y + 30), font, 1.3, color, 2)\n\ncv2.imwrite(outputfile, img)\n\n\n" } ]
2
Aagaarrd/02465-Pacman
https://github.com/Aagaarrd/02465-Pacman
f81685fb5124cea5303b1e6edb1a3222ba3c6b87
d17fd5f63d31d7139f300596fa73a8b5d70bd523
6379142585a650ae2d6c4133dc2604c2671ef128
refs/heads/master
2021-03-26T12:32:59.966906
2020-10-11T18:39:59
2020-10-11T18:39:59
247,704,651
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.617218554019928, "alphanum_fraction": 0.6317880749702454, "avg_line_length": 26.454545974731445, "blob_id": "799bc3d5346fae64e8ac211e699c8f5a2549568d", "content_id": "bf613f50c20db37cb1cadf2e17a2568a13ad301c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1510, "license_type": "no_license", "max_line_length": 95, "num_lines": 55, "path": "/q_agent.py", "repo_name": "Aagaarrd/02465-Pacman", "src_encoding": "UTF-8", "text": "\"\"\"\nThis file may not be shared/redistributed freely. Please read copyright notice in the git repo.\n\"\"\"\nimport numpy as np\nfrom collections import defaultdict\nfrom irlc.agent import Agent, train\nimport gym\nfrom irlc.irlc_plot import main_plot\nimport matplotlib.pyplot as plt\nimport gym_windy_gridworlds\n\n\n# from irlc import savepdf\n\n\nclass QAgent(Agent):\n \"\"\"\n Implement the Q-learning agent here. Note that the Q-datastructure already exist\n (see agent class for more information)\n \"\"\"\n\n def __init__(self, env, gamma=1.0, alpha=0.5, epsilon=0.1):\n self.alpha = alpha\n super().__init__(env, gamma, epsilon)\n\n def pi(self, s):\n \"\"\"\n Return current action using epsilon-greedy exploration. Look at the Agent class\n for ideas.\n \"\"\"\n return self.pi_eps(s)\n\n def train(self, s, a, r, sp, done=False):\n delta = r + self.gamma * np.max(self.Q[sp][a]) - self.Q[s][a]\n self.Q[s][a] += self.alpha * delta\n\n def __str__(self):\n return f\"QLearner_{self.gamma}_{self.epsilon}_{self.alpha}\"\n\n\ndef experiment():\n envn = 'StochWindyGridWorld-v0'\n env = gym.make(envn)\n agent = QAgent(env, epsilon=0.1, alpha=0.5)\n exp = f\"experiments/{str(agent)}\"\n train(env, agent, exp, num_episodes=200, max_runs=10)\n return env, exp\n\n\nif __name__ == \"__main__\":\n env, q_exp = experiment()\n main_plot(q_exp, smoothing_window=10)\n plt.ylim([-100, 0])\n plt.title(\"Q-learning on \" + env.spec._env_name)\n plt.show()\n" }, { "alpha_fraction": 0.5965932607650757, "alphanum_fraction": 0.6098878383636475, "avg_line_length": 36.609375, "blob_id": "24d5e4bb634d7daabd14da21a7c0fede0d3192c9", "content_id": "80b198a4f2f345bbc4976c0b83da1d3e45602a31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2407, "license_type": "no_license", "max_line_length": 98, "num_lines": 64, "path": "/exp_sarsa_agent.py", "repo_name": "Aagaarrd/02465-Pacman", "src_encoding": "UTF-8", "text": "\"\"\"\nThis file may not be shared/redistributed freely. Please read copyright notice in the git repo.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom q_agent import QAgent\nfrom irlc.irlc_plot import main_plot\nfrom irlc.agent import train\nfrom q_agent import experiment as q_agent_exp\nimport gym\nfrom irlc.common import defaultdict2\n\n\nclass ExpSarsaAgent(QAgent):\n def __init__(self, env, gamma=0.99, alpha=0.5, epsilon=0.1):\n self.t = 0 # indicate we are at the beginning of the episode\n super().__init__(env, gamma=gamma, alpha=alpha, epsilon=epsilon)\n\n def pi_probs(self, s):\n a = np.argmax(self.Q[s])\n pi_probs = np.ones(self.env.nA) * self.epsilon / self.env.nA\n pi_probs[a] += (1 - self.epsilon)\n return pi_probs\n\n def pi(self, s):\n if self.t == 0: # !f\n \"\"\" we are at the beginning of the episode. Generate a by being epsilon-greedy\"\"\"\n return self.pi_eps(s)\n else: # !f\n \"\"\" Return the action self.a you generated during the train where you know s_{t+1} \"\"\"\n return self.a\n\n def train(self, s, a, r, sp, done=False):\n \"\"\"\n generate A' as self.a by being epsilon-greedy. Re-use code from the Agent class.\n \"\"\"\n self.a = self.pi_eps(sp) if not done else -1 # !b #!b self.a = ....\n pi_probs = self.pi_probs(sp)\n exp_sarsa_target = np.dot(pi_probs, self.Q[sp])\n \"\"\" now that you know A' = self.a, perform the update to self.Q[s][a] here \"\"\"\n delta = r + (self.gamma * exp_sarsa_target if not done else 0) - self.Q[s][a] # !b\n self.Q[s][a] += self.alpha * delta # !b\n self.t = 0 if done else self.t + 1 # update current iteration number\n\n def __str__(self):\n return f\"ExpSarsa($\\\\gamma={self.gamma},\\\\epsilon={self.epsilon},\\\\alpha={self.alpha}$)\"\n\n\ndef experiment():\n envn = 'StochWindyGridWorld-v0'\n env = gym.make(envn)\n agent = ExpSarsaAgent(env, epsilon=0.1, alpha=0.5)\n exp = f\"experiments/{str(agent)}\"\n train(env, agent, exp, num_episodes=200, max_runs=10)\n return env, exp\n\n\nif __name__ == \"__main__\":\n env, q_experiment = q_agent_exp() # get results from Q-learning\n env, sarsa_exp = experiment()\n main_plot([q_experiment, sarsa_exp], smoothing_window=10)\n plt.ylim([-100, 0])\n plt.title(\"Q and Sarsa learning on \" + env.spec._env_name)\n plt.show()\n" }, { "alpha_fraction": 0.45396825671195984, "alphanum_fraction": 0.4756132662296295, "avg_line_length": 38.375, "blob_id": "cf1b086dfcb49fe8f352d4a53586c299b557df95", "content_id": "32ac6daffe840bcee64da923b7a7b8ca9b170382", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3465, "license_type": "no_license", "max_line_length": 131, "num_lines": 88, "path": "/Qsigma.py", "repo_name": "Aagaarrd/02465-Pacman", "src_encoding": "UTF-8", "text": "# Based on Q(sigma) from S&B\nfrom irlc.agent import Agent, train\nimport numpy as np\nfrom irlc.irlc_plot import main_plot\nimport matplotlib.pyplot as plt\nimport gym\nnp.seterr('raise')\n\nclass Qsigma(Agent):\n def __init__(self, env, gamma, alpha, epsilon, n):\n self.alpha = alpha\n self.gamma = gamma\n self.n = n\n self.R, self.S, self.A, self.rho, self.sigma = [None] * (self.n + 1), [None] * (self.n + 1), [None] * (\n self.n + 1), [None] * (self.n + 1), [None] * (self.n + 1)\n self.t = 0\n super().__init__(env, gamma, epsilon)\n\n def pi(self, s):\n if self.t == 0:\n self.A[self.t] = self.pi_eps(s)\n return self.A[self.t % (self.n + 1)]\n\n def train(self, s, a, r, sp, done=False):\n t, n = self.t, self.n\n if t == 0: # We are in the initial state. Reset buffer.\n self.S[0], self.A[0] = s, a\n behavior_policy = self.get_policy(s, epsilon=0.3)\n target_policy = self.get_policy(s, epsilon=self.epsilon)\n\n self.rho[t % (n + 1)] = target_policy['probs'][s % (n + 1)] / behavior_policy['probs'][s % (n + 1)]\n self.A[t % (n + 1)] = behavior_policy['action'] if not done else -1\n self.R[(t + 1) % (n + 1)] = r\n self.S[(t + 1) % (n + 1)] = sp\n self.sigma[t % (n + 1)] = self.get_sigma(a)\n\n if done:\n T = t + 1\n tau_steps_to_train = range(t - n + 1, T)\n else:\n T = 1e10\n tau_steps_to_train = [t - n + 1]\n ap = self.get_policy(sp, epsilon=0.3)['action']\n self.A[(t + 1) % (n + 1)] = ap\n self.sigma[(t + 1) % (n + 1)] = self.get_sigma(ap)\n self.rho[(t + 1) % (n + 1)] = target_policy['probs'][sp % (n + 1)] / behavior_policy['probs'][sp % (n + 1)]\n\n for tau in tau_steps_to_train:\n if tau >= 0:\n if t + 1 < T:\n G = self.Q[self.S[(t + 1) % (n + 1)]][self.A[(t + 1) % (n + 1)]]\n for k in range(min(t + 1, T), tau + 1, -1):\n k_idx = k % (n + 1)\n if k == T:\n G = self.R[T % (n + 1)]\n else:\n V = np.dot(target_policy['probs'], self.Q[sp])\n d = (self.sigma[k_idx] * self.rho[k_idx] + (1 - self.sigma[k_idx]) * target_policy['probs'][self.A[k_idx]])\n G = self.R[k_idx] + self.gamma * d * (G - self.Q[self.S[k_idx]][self.A[k_idx]]) + self.gamma * V\n\n S_tau, A_tau = self.S[tau % (n + 1)], self.A[tau % (n + 1)]\n delta = (G - self.Q[S_tau][A_tau])\n self.Q[S_tau][A_tau] += self.alpha * delta\n\n self.t += 1\n if done:\n self.t = 0\n\n def get_policy(self, s, epsilon):\n a = np.argmax(self.Q[s][:])\n pi_probs = np.ones(self.env.nA) * epsilon / self.env.nA\n pi_probs[a] += (1 - epsilon)\n return {'action': np.random.choice(range(self.env.nA), p=pi_probs), 'probs': pi_probs}\n\n def get_sigma(self, a):\n return np.random.randint(2, size=self.env.nA)[a]\n\n\nif __name__ == \"__main__\":\n envn = 'CliffWalking-v0'\n env = gym.make(envn)\n agent = Qsigma(env, n=3, gamma=0.9, epsilon=0.1, alpha=0.5)\n agent_name = \"Qsigma\"\n exp = f\"experiments/{envn}_{agent_name}\"\n train(env, agent, exp, num_episodes=200, max_runs=5)\n main_plot(exp, smoothing_window=10)\n plt.ylim([-100, 0])\n plt.show()\n" }, { "alpha_fraction": 0.5893738269805908, "alphanum_fraction": 0.5905123353004456, "avg_line_length": 29.63953399658203, "blob_id": "5fce5d6c489929edeb4349151cbbaae5bde3bce4", "content_id": "b03b39e59d4ac1ab79b728739792424cc636685c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2635, "license_type": "no_license", "max_line_length": 88, "num_lines": 86, "path": "/irlc/__init__.py", "repo_name": "Aagaarrd/02465-Pacman", "src_encoding": "UTF-8", "text": "import shutil\nimport inspect\nimport os\nimport compress_pickle\nimport numpy as np\n\n\ndef is_o_mode():\n return False\n\n\ndef bmatrix(a):\n if is_o_mode():\n return a.__str__()\n else:\n \"\"\"Returns a LaTeX bmatrix\n :a: numpy array\n :returns: LaTeX bmatrix as a string\n \"\"\"\n if len(a.shape) > 2:\n raise ValueError('bmatrix can at most display two dimensions')\n lines = str(a).replace('[', '').replace(']', '').splitlines()\n rv = [r'\\begin{bmatrix}']\n rv += [' ' + ' & '.join(l.split()) + r'\\\\' for l in lines]\n rv += [r'\\end{bmatrix}']\n return '\\n'.join(rv)\n\n\nfrom irlc.lazylog import LazyLog\n\ndef log_time_series(experiment, list_obs, max_xticks_to_log=None, run_name=None):\n logdir = f\"{experiment}/\"\n\n if max_xticks_to_log is not None and len(list_obs) > max_xticks_to_log:\n I = np.round(np.linspace(0, len(list_obs) - 1, max_xticks_to_log))\n list_obs = [o for i, o in enumerate(list_obs) if i in I.astype(np.int).tolist()]\n\n with LazyLog(logdir) as logz:\n for n, l in enumerate(list_obs):\n for k, v in l.items():\n logz.log_tabular(k, v)\n if \"Steps\" not in l:\n logz.log_tabular(\"Steps\", n)\n if \"Episode\" not in l:\n logz.log_tabular(\"Episode\", n)\n logz.dump_tabular(verbose=False)\n\n\n# from utils.irlc_plot import main_plot as main_plot\n\n\n# def cn_(file_name):\n# return \"cache/\"+file_name\n\ndef is_this_my_computer():\n CDIR = os.path.dirname(os.path.realpath(__file__)).replace('\\\\', '/')\n return os.path.exists(CDIR + \"/../../Exercises\")\n\n\ndef cache_write(object, file_name, only_on_professors_computer=False):\n if only_on_professors_computer and not is_this_my_computer():\n \"\"\" Probably for your own good :-). \"\"\"\n return\n # file_name = cn_(file_name) if cache_prefix else file_name\n dn = os.path.dirname(file_name)\n if not os.path.exists(dn):\n os.mkdir(dn)\n print(\"Writing cache...\", file_name)\n with open(file_name, 'wb', ) as f:\n compress_pickle.dump(object, f, compression=\"lzma\")\n print(\"Done!\")\n\n\ndef cache_exists(file_name, cache_prefix=True):\n # file_name = cn_(file_name) if cache_prefix else file_name\n return os.path.exists(file_name)\n\n\ndef cache_read(file_name, cache_prefix=True):\n # file_name = cn_(file_name) if cache_prefix else file_name\n if os.path.exists(file_name):\n with open(file_name, 'rb') as f:\n return compress_pickle.load(f, compression=\"lzma\")\n # return pickle.load(f)\n else:\n return None\n" }, { "alpha_fraction": 0.4675467610359192, "alphanum_fraction": 0.49229922890663147, "avg_line_length": 39.400001525878906, "blob_id": "08abfd71c05ca899f92284ea710b857853261f55", "content_id": "63aead579abd320b205359da5f6fb0242aa696ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3636, "license_type": "no_license", "max_line_length": 167, "num_lines": 90, "path": "/Qsigma_paper.py", "repo_name": "Aagaarrd/02465-Pacman", "src_encoding": "UTF-8", "text": "# based on Algorithm 1 https://arxiv.org/pdf/1703.01327.pdf\n\nfrom irlc.agent import Agent, train\nimport numpy as np\nfrom irlc.irlc_plot import main_plot\nimport matplotlib.pyplot as plt\nimport gym\nnp.seterr(all='raise')\n\n\nclass Qsigma(Agent):\n def __init__(self, env, gamma, alpha, epsilon, n):\n self.alpha = alpha\n self.gamma = gamma\n self.n = n\n self.R, self.S, self.A, self.rho, self.sigma, self.delta = [None] * (self.n + 1), [None] * (self.n + 1), [None] * (\n self.n + 1), [None] * (self.n + 1), [None] * (self.n + 1), [None] * (self.n + 1)\n self.behavior_policy = lambda s: self.get_policy(s, epsilon=0.3)\n self.target_policy = lambda s: self.get_policy(s, epsilon=self.epsilon)\n self.t = 0\n\n super().__init__(env, gamma, epsilon)\n\n def pi(self, s):\n if self.t == 0:\n self.A[self.t] = self.pi_eps(s)\n return self.A[self.t % (self.n + 1)]\n\n def train(self, s, a, r, sp, done=False):\n t, n = self.t, self.n\n if t == 0: # We are in the initial state. Reset buffer.\n self.S[0], self.A[0] = s, a\n self.rho[t % (n + 1)] = self.target_policy(s)['probs'][a] / self.behavior_policy(s)['probs'][a]\n self.A[t % (n + 1)] = self.behavior_policy(s)['action']\n self.R[t % (n + 1)] = r\n self.S[(t + 1) % (n + 1)] = sp\n self.sigma[t % (n + 1)] = self.get_sigma(a)\n\n if done:\n T = t + 1\n self.delta[t % (n+1)] = r - self.Q[self.S[t % (n+1)]][self.A[t % (n+1)]]\n else:\n T = np.inf\n ap = self.behavior_policy(sp)['action']\n self.A[(t + 1) % (n + 1)] = ap\n Qp = self.Q[sp % (n+1)][ap % (n+1)]\n self.Q[self.S[(t + 1) % (n + 1)]][self.A[(t + 1) % (n + 1)]] = Qp\n sigmap = self.get_sigma(ap)\n self.sigma[(t + 1) % (n + 1)] = sigmap\n Vp = sum(self.target_policy(sp)['probs'][a]*self.Q[sp][a] for a in range(env.nA))\n self.delta[t % (n + 1)] = r + self.gamma*(sigmap*Qp+(1-sigmap)*Vp) - self.Q[s % (n + 1)][a % (n + 1)]\n self.rho[(t + 1) % (n + 1)] = self.target_policy(sp)['probs'][sp % (n + 1)] / self.behavior_policy(sp)['probs'][sp % (n + 1)]\n\n tau = t - n + 1\n if tau >= 0:\n rho, E = 1, 1\n S_tau, A_tau = self.S[tau % (n + 1)], self.A[tau % (n + 1)]\n G = self.Q[S_tau][A_tau]\n for k in range(tau, min(tau+n-1, T-1)):\n k_id = k % (n + 1)\n G += E*self.delta[k_id]\n E = self.gamma*E*((1-self.sigma[k_id])*self.target_policy(self.S[(k + 1) % (n + 1)])['probs'][self.A[(k + 1) % (n + 1)]]+self.sigma[(k + 1) % (n + 1)])\n rho *= (1 - self.sigma[k_id] + self.sigma[k_id]*self.rho[k_id])\n\n delta = rho*(G-self.Q[S_tau][A_tau])\n self.Q[S_tau][A_tau] += self.alpha * delta\n\n self.t += 1\n if done:\n self.t = 0\n\n def get_policy(self, s, epsilon):\n a = np.argmax(self.Q[s])\n pi_probs = np.ones(self.env.nA) * epsilon / self.env.nA\n pi_probs[a] += (1 - epsilon)\n return {'action': np.random.choice(range(self.env.nA), p=pi_probs), 'probs': pi_probs}\n\n def get_sigma(self, a):\n return np.random.randint(2, size=self.env.nA)[a]\n\n\nif __name__ == \"__main__\":\n envn = 'CliffWalking-v0'\n env = gym.make(envn)\n agent = Qsigma(env, n=3, gamma=0.9, epsilon=0.1, alpha=0.5)\n exp = f\"experiments/{envn}_{agent}\"\n train(env, agent, exp, num_episodes=50, max_runs=5)\n main_plot(exp, smoothing_window=10)\n plt.ylim([-100, 0])\n plt.show()\n" }, { "alpha_fraction": 0.6539682745933533, "alphanum_fraction": 0.6571428775787354, "avg_line_length": 32.157894134521484, "blob_id": "295797eca24abea4b4a25be318cb7397d22778fa", "content_id": "6a2a4f4908f3a90ee1a7edfb9e4e745bc26630fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "no_license", "max_line_length": 99, "num_lines": 19, "path": "/irlc/common.py", "repo_name": "Aagaarrd/02465-Pacman", "src_encoding": "UTF-8", "text": "\"\"\"\nThis file may not be shared/redistributed freely. Please read copyright notice in the git repo.\n\"\"\"\nimport collections\nimport inspect\nimport types\n\n\nclass defaultdict2(collections.defaultdict):\n def __missing__(self, key):\n if self.default_factory is None:\n raise KeyError((key,))\n\n if isinstance(self.default_factory, types.FunctionType):\n nargs = len(inspect.getfullargspec(self.default_factory).args)\n self[key] = value = self.default_factory(key) if nargs == 1 else self.default_factory()\n return value\n else:\n return super().__missing__(key)\n" }, { "alpha_fraction": 0.5629077553749084, "alphanum_fraction": 0.5837216377258301, "avg_line_length": 36, "blob_id": "4bb2cfc084ec81cb6dfea873a24b9e42b4da33a4", "content_id": "05880040f9b346044491a1099673b2b5bf8e5d39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3219, "license_type": "no_license", "max_line_length": 116, "num_lines": 87, "path": "/Qsigmalambda.py", "repo_name": "Aagaarrd/02465-Pacman", "src_encoding": "UTF-8", "text": "# based on Algorithm 1 from https://arxiv.org/pdf/1711.01569.pdf\nimport gym\nimport matplotlib.pyplot as plt\nfrom irlc.common import defaultdict2\nfrom irlc.irlc_plot import main_plot\nfrom irlc.agent import Agent, train\nfrom sarsa_agent import SarsaAgent\nfrom exp_sarsa_agent import ExpSarsaAgent\nimport gym_windy_gridworlds\nimport numpy as np\n\n\nclass QslAgent(SarsaAgent):\n def __init__(self, env, gamma=0.99, epsilon=0.1, sigma=1, sigma_strat='dynamic', alpha=0.5, lamb=0.9):\n super().__init__(env, gamma=gamma, alpha=alpha, epsilon=epsilon)\n self.lamb = lamb\n self.e = defaultdict2(self.Q.default_factory)\n self.sigma = sigma\n self.sigma_strat = sigma_strat\n\n\n def pi_probs(self, s):\n a = np.argmax(self.Q[s])\n pi_probs = np.ones(self.env.nA) * self.epsilon / self.env.nA\n pi_probs[a] += (1 - self.epsilon)\n return pi_probs\n\n def pi(self, s):\n if self.t == 0:\n return self.pi_eps(s)\n else:\n p = self.pi_probs(s)\n return np.random.choice(np.arange(0, self.env.nA), p=p)\n\n def train(self, s, a, r, sp, done=False):\n pi_probs = self.pi_probs(sp)\n ap = self.pi_eps(sp)\n sigma = self.sigma*0.9 if self.sigma_strat == 'dynamic' else self.sigma\n sarsa_target = self.Q[sp][ap]\n exp_sarsa_target = np.dot(pi_probs, self.Q[sp])\n td_target = r + self.gamma * (sigma * sarsa_target + (1 - sigma) * exp_sarsa_target if not done else 0)\n td_error = td_target - self.Q[s][a]\n self.e[s][a] += 1\n for s, es in self.e.items():\n for a, e_sa in enumerate(es):\n self.Q[s][a] += self.alpha * td_error * self.e[s][a]\n self.e[s][a] *= self.gamma * self.lamb * (sigma + (1 - sigma) * pi_probs[ap])\n\n if self.t > 1000:\n done = True\n\n if done:\n self.e.clear()\n self.sigma = 1 if self.sigma_strat == 'dynamic' else sigma\n else:\n self.a = ap\n self.sigma = sigma\n self.t += 1\n\n def __str__(self):\n agent = f\"Q($\\\\sigma={self.sigma_strat}-{self.sigma},\\\\lambda={self.lamb}$)\"\n return f\"{agent}($\\\\gamma={self.gamma},\\\\epsilon={self.epsilon},\\\\alpha={self.alpha}$)\"\n\n\ndef run_exp(env, num_episodes=50, epsilon=0.1, alpha=0.6, gamma=0.90):\n for _ in range(50):\n agents = [SarsaAgent(env, epsilon=epsilon, alpha=alpha, gamma=gamma),\n ExpSarsaAgent(env, epsilon=epsilon, alpha=alpha, gamma=gamma),\n QslAgent(env, epsilon=epsilon, alpha=alpha, gamma=gamma, sigma_strat='static', sigma=0.5, lamb=1),\n QslAgent(env, epsilon=epsilon, alpha=alpha, gamma=gamma, lamb=0.8)]\n\n experiments = []\n for agent in agents:\n expn = f\"experiments/{str(agent)}\"\n train(env, agent, expn, num_episodes=num_episodes, max_runs=100)\n experiments.append(expn)\n return experiments\n\n\nif __name__ == \"__main__\":\n envn = 'StochWindyGridWorld-v0'\n env = gym.make(envn)\n experiments = run_exp(env, num_episodes=200)\n main_plot(experiments, smoothing_window=15)\n plt.ylim([-100, -30])\n plt.savefig('plot.png')\n plt.show()\n" }, { "alpha_fraction": 0.6136776208877563, "alphanum_fraction": 0.6182833313941956, "avg_line_length": 41.64881134033203, "blob_id": "de139eec87f693382c02e666a220a95f0a0a8b21", "content_id": "b544db8f0dde31a49a31ef406f69b5fb094ce964", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7165, "license_type": "no_license", "max_line_length": 135, "num_lines": 168, "path": "/irlc/agent.py", "repo_name": "Aagaarrd/02465-Pacman", "src_encoding": "UTF-8", "text": "\"\"\"\nThis file may not be shared/redistributed freely. Please read copyright notice in the git repo.\n\"\"\"\nimport sys\nimport itertools\nimport numpy as np\nfrom irlc import log_time_series\nfrom tqdm import tqdm\nfrom irlc.common import defaultdict2\nfrom gym.envs.toy_text.discrete import DiscreteEnv\nfrom irlc.irlc_plot import existing_runs\nimport warnings\nfrom collections import OrderedDict\nimport os\nimport glob\nimport csv\n\n\nclass Agent():\n \"\"\"\n Main agent class. See description of Ex09 for further details on how to use it with the environment, train and main_plot functions.\n \"\"\"\n\n def __init__(self, env, gamma=0.99, epsilon=0):\n self.env, self.gamma, self.epsilon = env, gamma, epsilon\n \"\"\"\n The self.Q variable is a custom datastructure to save the Q(s,a)-values during training. \n There are multiple ways to implement the Q-values differently than here, most of which will us in hot water\n down the line. For instance, Q-values could be stored like a states x actions numpy table; this is simpler\n than what we have below, but it has the disadvantage it uses a lot of memory and that the states and actions\n has to be integers indexed from zero (i.e. to index self.Q[s,a] ). Another idea is to use nested dictionaries, i.e. \n env.p[s] is a dictionary with keys a, this use less space, but it makes the max_a Q(s,a) operation difficult. \n Finally we want the action-space to depend on s. \n \n We solve this using a custom datastructure: A dictionary such that if we index self.Q[s] for an (unknown) s, \n it calls the function we provide to defaultdict2, i.e. defaultdict2(myfun) and inserts that value in the dictionary:\n note this is an extension of the defaultdict-class (google to learn more). \n \n >>> self.Q[s] = defaultdict2(myfun)\n >>> self.Q[s] = myfun(s) # when we index self.Q[s] where s is not in Q[s]\n \"\"\"\n self.Q = defaultdict2(\n lambda s: np.zeros(len(env.P[s]) if hasattr(env, 'P') and s in env.P else env.action_space.n))\n\n def pi(self, s):\n \"\"\" Should return the Agent's action in state s (i.e. an element contained in env.action_space)\"\"\"\n raise NotImplementedError(\"return action\")\n\n def train(self, s, a, r, sp, done=False):\n \"\"\" Called at each step of the simulation.\n The agent was in state s, took action a, ended up in state sp (with reward r).\n 'done' is a bool which indicates if the environment terminated when transitioning to sp. \"\"\"\n raise NotImplementedError()\n\n def __str__(self):\n \"\"\" A unique name for this agent. Used for plotting. \"\"\"\n return super().__str__()\n\n def random_pi(self, s):\n \"\"\" Generates a random action given s.\n\n It might seem strange why this is useful, however many policies requires us to to random exploration, and it is\n possible to get the method wrong.\n We will implement the method depending on whether self.env defines an MDP or just contains an action space.\n \"\"\"\n if isinstance(self.env, DiscreteEnv):\n return np.random.choice(list(self.env.P[s].keys()))\n else:\n return self.env.action_space.sample()\n\n def pi_eps(self, s):\n \"\"\" Implement epsilon-greedy exploration. Return random action with probability self.epsilon,\n else be greedy wrt. the Q-values. \"\"\"\n return self.random_pi(s) if np.random.rand() < self.epsilon else np.argmax(\n self.Q[s] + np.random.rand(len(self.Q[s])) * 1e-8)\n\n def value(self, s):\n return np.max(self.Q[s])\n\n\n\"\"\"\nThis is a simple wrapper class around the Agent class above. It fixes the policy and is therefore useful for doing \nvalue estimation.\n\"\"\"\n\n\nclass ValueAgent(Agent):\n def __init__(self, env, gamma=0.95, policy=None, v_init_fun=None):\n self.env = env\n self.policy = policy # policy to evaluate\n \"\"\" Value estimates. \n Initially v[s] = 0 unless v_init_fun is given in which case v[s] = v_init_fun(s). \"\"\"\n self.v = defaultdict2(float if v_init_fun is None else v_init_fun)\n super().__init__(env, gamma)\n\n def pi(self, s):\n return self.random_pi(s) if self.policy is None else self.policy(s)\n\n def value(self, s):\n return self.v[s]\n\n\ndef load_time_series(experiment_name, exclude_empty=True):\n \"\"\"\n Load most recent non-empty time series (we load non-empty since lazylog creates a new dir immediately)\n \"\"\"\n files = list(filter(os.path.isdir, glob.glob(experiment_name + \"/*\")))\n if exclude_empty:\n files = [f for f in files if\n os.path.exists(os.path.join(f, \"log.txt\")) and os.stat(os.path.join(f, \"log.txt\")).st_size > 0]\n\n recent = sorted(files, key=lambda file: os.path.basename(file))[-1]\n stats = []\n with open(recent + '/log.txt', 'r') as f:\n csv_reader = csv.reader(f, delimiter='\\t')\n for i, row in enumerate(csv_reader):\n if i == 0:\n head = row\n else:\n stats.append({k: float(v) for k, v in zip(head, row)})\n return stats, recent\n\n\ndef train(env, agent, experiment_name=None, num_episodes=None, verbose=True, reset=True, max_steps=1e10,\n max_runs=None, saveload_model=False, save_stats=True):\n if max_runs is not None and existing_runs(experiment_name) >= max_runs:\n return experiment_name, None, True\n stats = []\n steps = 0\n ep_start = 0\n if saveload_model: # Code for loading/saving models\n did_load = agent.load(os.path.join(experiment_name))\n if did_load:\n stats, recent = load_time_series(experiment_name=experiment_name)\n ep_start, steps = stats[-1]['Episode'] + 1, stats[-1]['Steps']\n\n done = False\n with tqdm(total=num_episodes, disable=not verbose) as tq:\n for i_episode in range(num_episodes):\n s = env.reset() if reset else (env.s if hasattr(env, \"s\") else env.env.s)\n reward = []\n for _ in itertools.count():\n a = agent.pi(s)\n sp, r, done, _ = env.step(a)\n agent.train(s, a, r, sp, done)\n reward.append(r)\n steps += 1\n if done or steps > max_steps:\n break\n s = sp\n\n stats.append({\"Episode\": i_episode + ep_start,\n \"Accumulated Reward\": sum(reward),\n \"Average Reward\": np.mean(reward),\n \"Length\": len(reward),\n \"Steps\": steps})\n tq.set_postfix(ordered_dict=OrderedDict(stats[-1]))\n tq.update()\n sys.stderr.flush()\n if saveload_model:\n agent.save(experiment_name)\n if did_load and save_stats:\n os.rename(recent + \"/log.txt\", recent + \"/log2.txt\") # Shuffle old logs\n\n if experiment_name is not None and save_stats:\n log_time_series(experiment=experiment_name, list_obs=stats)\n print(f\"Training completed. Logging: '{', '.join(stats[0].keys())}' to {experiment_name}\")\n return experiment_name, stats, done\n" } ]
8
chenqi2021/Homework
https://github.com/chenqi2021/Homework
bb547d66abb495849c27c8c697b1c3e46394555c
f963d6dfafb9ad0c6f66ad02f40795115692dc5b
a8468ed27a450bf3aa77033df5ef2dc32e18af38
refs/heads/main
2023-02-25T10:51:05.259051
2021-02-01T13:19:50
2021-02-01T13:19:50
334,953,305
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48148149251937866, "alphanum_fraction": 0.5899471044540405, "avg_line_length": 32.3636360168457, "blob_id": "cf07df8fd7d879114930dc53ba877bbd32a44a71", "content_id": "f227563abc17cd80dec51aeac34e41f5d8b124a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 426, "license_type": "no_license", "max_line_length": 126, "num_lines": 11, "path": "/Day1/Action2.py", "repo_name": "chenqi2021/Homework", "src_encoding": "UTF-8", "text": "from pandas import Series, DataFrame\r\ndata = {'姓名':['张飞', '关羽', '刘备', '典韦', '许褚'],'语文': [68, 95, 98, 90,80], '数学': [65, 76, 86, 88, 90], '英语': [30, 98, 88, 77, 90]}\r\nprint(data)\r\ndf1 = DataFrame(data)\r\n# df1.set_index('姓名',inplace=True)\r\nprint(df1)\r\nprint(df1.describe())\r\nprint(df1.var())\r\ndf1[\"总分\"] = df1.sum(axis=1)\r\ndf2 = df1.sort_values(\"总分\", ascending=False)\r\nprint(df2)\r\n" } ]
1
SuhrudhSarathy/indbot
https://github.com/SuhrudhSarathy/indbot
88e51990d7aa31d3615dd9a7f11970e6cf6c4e44
08657c9333e40d05adcfffc1e02b14c60d3ee570
ba92ea38c47b6ed69a5f542af40d7d14abab40e3
refs/heads/master
2023-03-06T09:12:29.670033
2020-06-05T05:57:28
2020-06-05T05:57:28
265,811,953
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.48585256934165955, "alphanum_fraction": 0.5063291192054749, "avg_line_length": 31.944786071777344, "blob_id": "3762456041f846597fa4c2189237304dd38d4630", "content_id": "d7ba45d7c690cbbffeee75e8d32b78d438611da7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5372, "license_type": "permissive", "max_line_length": 162, "num_lines": 163, "path": "/indbot_controls/scripts/tests/ppl.py", "repo_name": "SuhrudhSarathy/indbot", "src_encoding": "UTF-8", "text": "\n\nclass Node():\n\n '''\n Node for Astar\n '''\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.distance = np.Inf\n self.hueristic = np.Inf\n self.parent = None\n\nclass PathPlanner():\n\n '''\n Path planner class\n Args:\n start point, goal point\n\n '''\n def __init__(self, start, goal, obstacles):\n\n\n self.start = start\n self.goal = goal\n self.obstacles = obstacles\n self.nodes = [self.start, self.goal]\n self.path = []\n \n def make_nodes(self, resolution = 1):\n \n '''\n make nodes in the specified area\n Args:\n area = [length, width]\n resolution = number of nodes in 1m\n\n call this function everytime the path planner is called\n '''\n\n fig = plt.figure()\n self.ax = fig.add_subplot(111)\n\n CENTER = ((self.start.x + self.goal.x)/2, (self.start.y + self.goal.y)/2)\n dist = np.sqrt((self.start.x - self.goal.x)**2 + (self.start.y - self.goal.y)**2) / 2\n SEMI_AXES = (dist, dist * 1.5)\n ANGLE = np.degrees(np.arctan2(self.goal.x - self.start.x, self.goal.y - self.start.y))\n safety1r = 2\n safety2r = 2\n\n circle = Point(CENTER).buffer(1)\n ell = scale(circle, SEMI_AXES[0], SEMI_AXES[1])\n ell = rotate(ell, 90 - ANGLE)\n safety1 = Point((self.start.x, self.start.y)).buffer(safety1r)\n safety2 = Point((self.goal.x, self.goal.y)).buffer(safety2r)\n structure = cascaded_union([ell, safety1, safety2])\n\n for i in np.arange(-5 - safety1r , int(SEMI_AXES[0]*2 + safety2r + 5), resolution):\n for j in np.arange(-5 - int(SEMI_AXES[1]),int(SEMI_AXES[1] * np.cos(90 - ANGLE) + CENTER[1]) + 5):\n if Point(i, j).within(structure):\n node = Node(i, j)\n self.nodes.append(node)\n self.ax.scatter(node.x, node.y, color='red', alpha=0.5)\n else:\n continue\n \n ## check for the ellipse\n \n \n patch = PolygonPatch(structure, alpha = 0.5, color = 'green')\n self.ax.add_patch(patch)\n\n def transform_nodes(self, nodes):\n '''\n use this function to transform nodes to world frame\n '''\n pass\n\n def plan_path(self):\n self.start.distance = 0\n self.start.hueristic = self.start.distance + np.sqrt((self.start.x - self.goal.x)**2 + (self.start.y - self.goal.y)**2)\n while len(self.nodes) !=0:\n self.nodes = sorted(self.nodes, key=lambda node: node.hueristic)\n current = self.nodes[0]\n\n adjacent_nodes = [node for node in self.nodes if int(np.sqrt((node.x - current.x)**2 + (node.y - current.y)**2)) == 1]\n self.nodes.remove(current)\n \n if current.x == self.goal.x and current.y == self.goal.y:\n self.last = current\n print(\"---path planned---\")\n break\n\n else:\n for node in adjacent_nodes:\n if self.intersection(current, node) == True:\n continue\n else:\n if node.distance > current.distance + np.sqrt((current.x - node.x)**2 + (current.y - node.y)**2):\n \n node.distance = current.distance + np.sqrt((current.x - node.x)**2 + (current.y - node.y)**2)\n node.hueristic = node.distance + np.sqrt((self.goal.x - node.x) **2 + (self.goal.y - node.y) ** 2)\n node.parent = current\n \n\n if node not in self.path:\n self.path.append(node)\n\n\n def get_path(self):\n path = [self.goal]\n current = self.goal\n while current.parent != None:\n \n path.append(current.parent)\n current = current.parent\n \n return path\n\n def intersection(self, point1, point2):\n \n for obstacle in self.obstacles:\n if LineString([(p[0], p[1]) for p in obstacle]).intersects(LineString([(point1.x, point1.y), (point2.x, point2.y)])):\n return True\n \n else:\n return False\n \n def main(self, resolution = 1): \n\n return path\n\n \nif __name__ == '__main__':\n '''rospy.init_node('obst_detector')\n rate = rospy.Rate(10)\n \n detector = ObjectDetector()'''\n\n start = Node(0, 0)\n goal = Node(10, 10)\n\n obstacles = []\n\n for i in range(50):\n point = (random.random() * 10, random.random() * 10)\n obstacles.append([(point[0] + 0.5, point[1] + 0.5), (point[0] + 0.5, point[1] - 0.5), (point[0] - 0.5, point[1] - 0.5), (point[0] - 0.5, point[1] + 0.5)])\n \n planner = PathPlanner(start, goal, obstacles)\n planner.make_nodes(0.5)\n planner.plan_path()\n path = planner.get_path()\n planner.transform_nodes(path)\n\n for obstacle in obstacles:\n planner.ax.plot([p[0] for p in obstacle], [p[1] for p in obstacle], color = 'black')\n \n \n planner.ax.plot([p.x for p in path], [p.y for p in path], color='yellow')\n\n planner.ax.scatter([start.x, goal.x], [start.y, goal.y], color = 'green')\n plt.show()\n" }, { "alpha_fraction": 0.6314554214477539, "alphanum_fraction": 0.6431924700737, "avg_line_length": 29.035715103149414, "blob_id": "3868b02f9d9eed1f4d90da94fc6d78fe29ed181f", "content_id": "a6f7a59bc7047e351c8d77145bfc19f767646ad1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 852, "license_type": "permissive", "max_line_length": 106, "num_lines": 28, "path": "/indbot_controls/scripts/transforms.py", "repo_name": "SuhrudhSarathy/indbot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nimport tf_conversions\nimport tf2_ros, tf2_geometry_msgs\n\nfrom geometry_msgs.msg import Twist, PointStamped, Point\nfrom std_msgs.msg import Header\n\nif __name__ == '__main__':\n rospy.init_node('check_transforms')\n\n tfBuffer = tf2_ros.Buffer()\n listener = tf2_ros.TransformListener(tfBuffer)\n\n rate = rospy.Rate(10)\n point = PointStamped(header=Header(frame_id = 'odom'), point = Point(2, 2, 0))\n while not rospy.is_shutdown():\n\n try:\n trans = tfBuffer.lookup_transform('base_footprint', 'odom', rospy.Time())\n \n point_transformed = tf2_geometry_msgs.do_transform_point(point, trans)\n print(point_transformed.point.x, point_transformed.point.y, point_transformed.header.frame_id)\n\n except:\n continue\n\n rate.sleep()\n\n \n\n" }, { "alpha_fraction": 0.537898600101471, "alphanum_fraction": 0.5464366674423218, "avg_line_length": 34.76250076293945, "blob_id": "7ab802b1ef53a2e8398e3c89dbdde21ff0a2dae8", "content_id": "9c1712402d9b954d3449c64821ae280fd5bbce60", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5739, "license_type": "permissive", "max_line_length": 126, "num_lines": 160, "path": "/indbot_controls/scripts/turtlebot_controller.py", "repo_name": "SuhrudhSarathy/indbot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport rospy\nfrom nav_msgs.msg import Odometry, Path\nfrom geometry_msgs.msg import Twist, Point, Quaternion, PoseStamped\nfrom tf.transformations import euler_from_quaternion as efq \n\nimport numpy as np \nfrom collections import namedtuple\n\nOrientation = namedtuple('Orientation', ['roll', 'pitch', 'yaw'])\nGains = namedtuple('Gains', ['kp', 'kd', 'ki'])\n\nDISTMIN = 0.1\nMAXX = 0.22\nMAXANG = 2\nDIST_THRES = 0.1\n\n\nclass Controller():\n '''\n Main controller class\n '''\n\n def __init__(self):\n \n # Define the messages\n self.velocity = Twist()\n self.position = Point()\n self.orientation = Orientation(0, 0, 0)\n self.path = Path()\n self.path_points = []\n self.current_index = 0\n self.nextWay = None\n\n self.goal = None\n self.goal_reached = False\n self.new_goal_recieved = False\n \n # Initialize subs and pubs\n self.vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)\n self.path_sub = rospy.Subscriber('/path', Path, self.__path_sub)\n self.odom_sub = rospy.Subscriber('/odom', Odometry, self.__odom_sub)\n \n\n # Parameters for PID tuning\n self.x_gain = Gains(1, 0, 0)\n self.ang_gain = Gains(0.3, 0, 0)\n self.xdiff, self.angdiff, self.xintegral, self.angintegral = 0, 0, 0, 0\n\n def __odom_sub(self, msg):\n\n # Calbacck function for odomentry\n self.position = msg.pose.pose.position\n quaternion = msg.pose.pose.orientation\n orientatioN = efq([quaternion.x, quaternion.y, quaternion.z, quaternion.w])\n self.orientation = Orientation(orientatioN[0], orientatioN[1], orientatioN[2])\n \n \n \n def __path_sub(self, msg):\n\n #Callback for path \n self.path = msg\n poses = self.path.poses\n self.path_points = [Point(pose.pose.position.x, pose.pose.position.y, pose.pose.position.z) for pose in poses]\n\n # Run the set goal function\n self._set_goal()\n def _set_goal(self):\n '''\n Function that sets goal for controller to move to \n i.e. the next way point \n '''\n try:\n\n self.final_goal = self.path_points[-1]\n if np.sqrt((self.final_goal.x - self.position.x)**2 + (self.position.y - self.final_goal.y)**2) < 0.1:\n rospy.loginfo('final_goal_reached')\n self.goal_reached = True\n self.move_to_goal = False\n self.current_index = 0 \n\n if len(self.path_points) != 0 and not self.goal_reached:\n self.nextWay = self.path_points[self.current_index]\n print((self.nextWay.x, self.nextWay.y), self.current_index)\n if abs(self.nextWay.x - self.position.x) < DISTMIN and abs(self.nextWay.y - self.position.y) < DISTMIN:\n self.move_to_goal = False \n self.current_index += 1\n print('popped')\n else :\n self.goal = self.nextWay\n self.move_to_goal = True\n self._move_bot()\n print('next goal set')\n elif self.goal_reached:\n self.velocity.linear.x, self.velocity.angular.z = 0, 0\n self.vel_pub.publish(self.velocity)\n print('-------Completed Path-------')\n\n except Exception as err:\n rospy.logwarn(err)\n \n \n\n def _move_bot(self):\n '''\n handles all the velocity commands to be published\n '''\n # Calculate all errors\n ang_goal = np.arctan2(self.goal.y - self.position.y, self.goal.x - self.position.x)\n x_error = np.sqrt((self.goal.x - self.position.x) ** 2 + (self.goal.y - self.position.y) ** 2)\n ang_error = ang_goal - self.orientation.yaw\n\n # A simple PID Controller, currently tuned by using just Proportional gain\n \n if self.move_to_goal:\n \n x_error = np.sqrt((self.goal.x - self.position.x) ** 2 + (self.goal.y - self.position.y) ** 2)\n ang_error = ang_goal - self.orientation.yaw\n \n if abs(ang_error) < DIST_THRES:\n ang_vel = self.ang_gain.kp * ang_error + self.ang_gain.kd * self.angdiff + self.ang_gain.ki * self.angintegral\n lin_vel = self.x_gain.kp * x_error + self.x_gain.kd * self.xdiff + self.x_gain.ki * self.xintegral\n ang_vel = max(-MAXANG, min(ang_vel, MAXANG))\n lin_vel = max(0, min(lin_vel, MAXX))\n\n self.xdiff = x_error - self.xdiff\n self.xintegral += x_error\n\n self.angdiff = ang_error - self.angdiff\n self.angintegral += ang_error\n \n else:\n ang_vel = self.ang_gain.kp * ang_error + self.ang_gain.kd * self.angdiff + self.ang_gain.ki * self.angintegral\n ang_vel = max(-MAXANG, min(ang_vel, MAXANG))\n self.angdiff = ang_error - self.angdiff\n self.angintegral += ang_error\n lin_vel = 0\n \n \n self.velocity.linear.x = lin_vel\n self.velocity.angular.z = ang_vel\n self.vel_pub.publish(self.velocity)\n else:\n \n self.velocity.linear.x = 0\n self.velocity.angular.z = 0\n self.vel_pub.publish(self.velocity)\n print('Waypoint Reached')\n\n\nif __name__ == '__main__':\n # Instance of Controller\n rospy.init_node('TurtleBotController')\n rate = rospy.Rate(10)\n rospy.loginfo('Controller Initiated')\n controller = Controller()\n\n rospy.spin()\n \n\n \n\n\n" }, { "alpha_fraction": 0.6845070719718933, "alphanum_fraction": 0.7014084458351135, "avg_line_length": 21.25, "blob_id": "23354789456a6284e35ce95e1abc268bcd660cb4", "content_id": "8aa6c16016e9e96172fbe4f80fe39175bd89c6dc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "permissive", "max_line_length": 66, "num_lines": 16, "path": "/indbot_controls/scripts/tests/omni.py", "repo_name": "SuhrudhSarathy/indbot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import TwistStamped\n\nrospy.init_node('controller_test')\nvel_pub = rospy.Publisher('/cmd_vel', TwistStamped, queue_size=10)\nvel = TwistStamped()\nrate = rospy.Rate(10)\n\nwhile not rospy.is_shutdown():\n vel.twist.linear.x = 1 \n vel.twist.linear.y = 1 \n\n vel_pub.publish(vel)\n\n rate.sleep()" }, { "alpha_fraction": 0.5203627347946167, "alphanum_fraction": 0.5523495674133301, "avg_line_length": 34.39765930175781, "blob_id": "141c11f97abb6d2e397aa89c4dd0871330a3ec82", "content_id": "6794bcf89023021c1bf4c0c3da825f88bdf8f773", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6065, "license_type": "permissive", "max_line_length": 160, "num_lines": 171, "path": "/indbot_controls/scripts/rrt.py", "repo_name": "SuhrudhSarathy/indbot", "src_encoding": "UTF-8", "text": "'''\n Basic Implementaion of RRT without any Path Optimiser\n'''\nimport matplotlib.pyplot as plt \nimport numpy as np \nfrom shapely.geometry import Point, LineString, Polygon\nimport random\nimport time\n\n\ndef plot_obstacle(obstacle):\n point_list = obstacle.exterior.coords\n x_list = [p[0] for p in point_list]\n y_list = [p[1] for p in point_list]\n plt.fill(x_list, y_list, color='black')\n\nclass Rectangle():\n def __init__(self, center):\n self.x, self.y = center[0], center[1]\n self.polygon = Polygon([(self.x + 0.25, self.y + 0.25), (self.x - 0.25, self.y + 0.25), (self.x - 0.25, self.y - 0.25), (self.x + 0.25, self.y - 0.25)])\n\ndef generate_random_map(number):\n obstacle_list = []\n for i in range(number):\n rectangle = Rectangle((random.random()*10, random.random()*10))\n obstacle_list.append(rectangle.polygon)\n for obstacle in obstacle_list:\n plot_obstacle(obstacle)\n return obstacle_list\n\ndef obstacles():\n poly1 = Polygon([(2, 10), (7, 10), (7, 1), (6, 1), (6, 6), (4, 6), (4, 9), (2, 9)])\n poly2 = Polygon([(4, 0), (4, 5), (5, 5), (5, 0)])\n poly3 = Polygon([(8, 2), (8, 7), (10, 7), (10, 2)])\n '''cirlce1 = Point(8, 3).buffer(1)\n circle2 = Point(2, 7).buffer(1.5)\n poly4 = Polygon([(11, 10), (11, 13), (12, 13.75), (13, 12)])\n circle3 = Point(10, 1).buffer(0.75)\n circle4 = Point(11, 1.2).buffer(0.86)\n circle5 = Point(5, 15).buffer(1)\n circle6 = Point(4, 10).buffer(1)\n circle7 = Point(5, 10.7).buffer(1)'''\n obstacle_list = [poly1, poly2, poly3]\n return obstacle_list\n\n\n\ndef collisionCheck(point1, point2, obstacle_list):\n try:\n line = LineString([(point1.x, point1.y), (point2.x, point2.y)])\n except:\n line = LineString([(point1[0], point1[1]), (point2[0], point2[1])])\n intersection = 0\n for obstacle in obstacle_list:\n if line.intersects(obstacle):\n intersection += 1\n else:\n pass\n if intersection == 0:\n collision = False\n else:\n collision = True\n return collision\n\ndef distance(point1, point2):\n distance = np.sqrt((point1.x - point2.x)**2 + (point1.y - point2.y)**2)\n return distance\n\ndef new_vector(point1, point2, threshold):\n vector = Node((point2.x - point1.x), (point2.y - point1.x))\n vector.x, vector.y = vector.x/np.sqrt((vector.x**2 + vector.y**2)), vector.y/np.sqrt((vector.x**2 + vector.y**2))\n vector.x, vector.y = point1.x + vector.x * threshold, point1.y + vector.y * threshold\n return vector\nclass Node():\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.parent = None\n self.distance = 0\nclass RRT():\n\n '''\n Main RRT Class used for path planning\n Args:\n threshold: A threshold for deciding the delta parameter\n max_iter : Maximum number for iterations\n '''\n def __init__(self, threshold, max_iter):\n\n self.threshold = threshold\n self.nodes = []\n self.is_reached = False\n self.max_iter = max_iter\n self.goal_sample_rate = 0.1\n\n def reset(self, max_iter):\n self.nodes = []\n self.is_reached = False\n self.max_iter = max_iter\n self.goal_sample_rate = 0.1\n\n def _start_tree(self, start, goal, obstacle_list, max_iter):\n self.reset(max_iter)\n self.start = start\n self.goal = goal\n self.nodes.append(self.start)\n self.obstacle_list = obstacle_list\n #time1 = time.time()\n while self.max_iter > 0:\n print('planning', self.max_iter)\n new_node = self.generate_random_node()\n \n for node in self.nodes:\n node.distance = distance(node, new_node)\n self.nodes = sorted(self.nodes, key=lambda node: node.distance)\n nearby_node = self.nodes[0]\n for node in self.nodes:\n node.distance = 0 \n if distance(nearby_node, new_node) <= self.threshold:\n if collisionCheck(nearby_node, new_node, self.obstacle_list) == False:\n new_node.parent = nearby_node\n self.nodes.append(new_node)\n else : \n pass\n else:\n new_node = new_vector(nearby_node, new_node, self.threshold)\n if collisionCheck(new_node, nearby_node, self.obstacle_list) == False:\n new_node.parent = nearby_node\n self.nodes.append(new_node)\n else :\n pass\n if new_node.x == self.goal.x and new_node.y == self.goal.y:\n self.is_reached == True\n print('-'*30 + 'Reached' + '-'*30)\n break\n self.max_iter -= 1\n \n\n def plot_points(self):\n plt.scatter([p.x for p in self.nodes], [p.y for p in self.nodes], color='red')\n for node in self.nodes :\n plt.plot([node.x, node.parent.x], [node.y, node.parent.y], color = 'green')\n plt.scatter([self.start.x, self.goal.x], [self.start.y, self.goal.y], color='yellow')\n\n def generate_random_node(self):\n if np.random.random_sample() > self.goal_sample_rate:\n x = np.random.uniform(-5, 10)\n y = np.random.uniform(-5, 10)\n new_node = Node(x, y)\n else:\n new_node = self.goal\n return new_node\n \n def get_path(self, start, goal, obstacle_list, max_iter):\n #print(obstacle_list)\n self._start_tree(start, goal, obstacle_list, max_iter)\n path = [self.nodes[-1]]\n current = self.nodes[-1]\n print(len(self.nodes))\n while current.parent != None:\n if current.x == start.x and current.y == start.y:\n path.append(current)\n break\n else:\n path.append(current.parent)\n current = current.parent\n path_planned = [(p.x, p.y, 0) for p in path]\n return path_planned[::-1]\n \n def path_optimiser(self, path, obstacles):\n pass\n\n \n \n\n" }, { "alpha_fraction": 0.5266058444976807, "alphanum_fraction": 0.5345876216888428, "avg_line_length": 30.787878036499023, "blob_id": "78639090048e063abcf4f227766b48249581cd57", "content_id": "902f7a943918cdb655211aadc840faca0b399790", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5262, "license_type": "permissive", "max_line_length": 119, "num_lines": 165, "path": "/indbot_controls/scripts/omni_controller.py", "repo_name": "SuhrudhSarathy/indbot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport rospy\nfrom nav_msgs.msg import Odometry, Path\nfrom geometry_msgs.msg import TwistStamped, Point, Quaternion\nfrom tf.transformations import euler_from_quaternion as efq \n\nimport numpy as np \nfrom collections import namedtuple\n\nOrientation = namedtuple('Orientation', ['roll', 'pitch', 'yaw'])\nGains = namedtuple('Gains', ['kp', 'kd', 'ki'])\n\nDISTMIN = 0.1\nMAXX = 0.25\nMAXY = 0.25\n\nclass Controller():\n '''\n Main controller class\n '''\n\n def __init__(self):\n \n # Define the messages\n self.velocity = TwistStamped()\n self.position = Point()\n self.orientation = Orientation(0, 0, 0)\n self.path = Path()\n self.path_points = []\n self.index = 0\n self.nextWay = None\n self.goal_reached = False\n self.goal = None\n self.current_index = 0\n self.move_to_goal = False\n\n # Initialize subs and pubs\n self.vel_pub = rospy.Publisher('/cmd_vel', TwistStamped, queue_size=10)\n self.path_sub = rospy.Subscriber('/path', Path, self.__path_sub)\n self.odom_sub = rospy.Subscriber('/odom', Odometry, self.__odom_sub)\n\n # Parameters for PID tuning\n self.x_gain = Gains(1, 0, 0)\n self.y_gain = Gains(1, 0, 0)\n\n def __odom_sub(self, msg):\n self.position = msg.pose.pose.position\n quaternion = msg.pose.pose.orientation\n orientatioN = efq([quaternion.x, quaternion.y, quaternion.z, quaternion.w])\n self.orientation = Orientation(orientatioN[0], orientatioN[1], orientatioN[2])\n \n \n \n \n def __path_sub(self, msg):\n self.path = msg\n poses = self.path.poses\n self.path_points = [Point(pose.pose.position.x, pose.pose.position.y, pose.pose.position.z) for pose in poses]\n self.set_goal()\n \n\n def set_goal(self):\n '''\n Sets next waypoint fot the bot to travel to\n '''\n try:\n self.final_goal = self.path_points[-1]\n if np.sqrt((self.final_goal.x - self.position.x)**2 + (self.position.y - self.final_goal.y)**2) < 0.1:\n rospy.loginfo('final_goal_reached')\n self.goal_reached = True\n self.move_to_goal = False\n self.current_index = 0 \n\n if len(self.path_points) != 0 and not self.goal_reached:\n self.nextWay = self.path_points[self.current_index]\n print((self.nextWay.x, self.nextWay.y), self.current_index)\n if abs(self.nextWay.x - self.position.x) < DISTMIN and abs(self.nextWay.y - self.position.y) < DISTMIN:\n self.move_to_goal = False \n self.current_index += 1\n print('popped')\n else :\n self.goal = self.nextWay\n self.move_to_goal = True\n self._move_bot()\n print('next goal set')\n elif self.goal_reached:\n self.velocity.twist.linear.x, self.velocity.twist.angular.z = 0, 0\n self.vel_pub.publish(self.velocity)\n print('-------Completed Path-------')\n\n except Exception as err:\n rospy.logwarn(err)\n \n \n def vel_constraint(self, velocity, dir):\n '''\n sets tyhe velocity constraints\n '''\n if dir.lower() == 'x':\n if velocity > MAXX:\n velocity = MAXX\n elif velocity < - MAXX:\n velocity = MAXX\n\n else:\n velocity = velocity\n\n elif dir.lower() == 'y':\n if velocity > MAXY:\n velocity = MAXY\n elif velocity < - MAXY:\n velocity = MAXY\n\n else:\n velocity = velocity\n\n return velocity\n\n def _move_bot(self):\n '''\n handles all the velocity commands to be published\n '''\n x_error = abs(self.position.x - self.nextWay.x)\n y_error = abs(self.position.y - self.nextWay.y)\n\n xdiff, ydiff, xintegral, yintegral = 0, 0, 0, 0\n\n if self.move_to_goal:\n\n x_error = abs(self.position.x - self.nextWay.x)\n y_error = abs(self.position.y - self.nextWay.y) \n\n velx = self.x_gain.kp * x_error + self.x_gain.kd * xdiff + self.x_gain.ki * xintegral\n vely = self.y_gain.kp * y_error + self.y_gain.kd * ydiff + self.y_gain.ki * yintegral\n\n velx, vely = self.vel_constraint(velx, 'x'), self.vel_constraint(vely, 'y')\n\n xdiff = x_error - xdiff\n ydiff = y_error - ydiff\n\n xintegral += x_error\n yintegral += y_error\n self.velocity.twist.linear.x = velx\n self.velocity.twist.linear.y = vely\n\n self.vel_pub.publish(self.velocity)\n print('vel published')\n\n else:\n \n self.velocity.twist.linear.x = 0\n self.velocity.twist.angular.z = 0\n self.vel_pub.publish(self.velocity)\n print('Waypoint Reached')\n\n\nif __name__ == '__main__':\n rospy.init_node('omnicontroller')\n rate = rospy.Rate(10)\n\n controller = Controller()\n \n\n rospy.spin()\n \n\n \n\n\n" }, { "alpha_fraction": 0.5342419743537903, "alphanum_fraction": 0.5419013500213623, "avg_line_length": 37.406925201416016, "blob_id": "f20a2b9d412093e728f16a8ef057d6d7f113e474", "content_id": "e7fcea60bbfff3609dc128b693507e7506d00de1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8878, "license_type": "permissive", "max_line_length": 184, "num_lines": 231, "path": "/indbot_controls/scripts/dynamic_manager.py", "repo_name": "SuhrudhSarathy/indbot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\ntry :\n import rospy \n from nav_msgs.msg import Odometry, Path\n from geometry_msgs.msg import Point, PoseStamped, Quaternion, Pose, Point32, PointStamped\n from sensor_msgs.msg import LaserScan\n from std_msgs.msg import Header\n import tf2_ros\n import tf2_geometry_msgs\n \nexcept:\n raise ImportError\nimport numpy as np \nfrom rrt import RRT, Node\nfrom shapely.geometry import LineString\n\nOBST_THRES = 0.25\n\nclass Manager():\n '''\n Main manager class that takes care of calling the path planner\n Also takes care of updating the nextWay point for the bot to move to in the path\n '''\n\n def __init__(self):\n\n #Initialising messages and variables\n self.position = Point()\n self.path = Path()\n self.obstacles = []\n self.start = Point()\n self.goal = Point()\n self.scan = LaserScan()\n self.goal_recieved = False\n \n\n #Publishers and Subscribers\n self.path_pub = rospy.Publisher('/path', Path, queue_size=10)\n self.odom_sub = rospy.Subscriber('/odom', Odometry, self.__odom_update) \n self.scan_pub = rospy.Subscriber('/scan', LaserScan, self.__laser_sub)\n self.goal_sub = rospy.Subscriber('/move_base_simple/goal', PoseStamped, self.__goal_sub) \n\n #Flag to check if final goal is reached\n self.not_reached = True\n\n #Initialising path planner\n self.path_planner = RRT(1, 1000)\n self.path_points = []\n \n #Initialise transform listener\n self.tfBuffer = tf2_ros.Buffer()\n self.listener = tf2_ros.TransformListener(self.tfBuffer)\n\n self.rate = rospy.Rate(10)\n \n def __odom_update(self, msg):\n '''\n Callback function for odometry\n '''\n self.position = Point(msg.pose.pose.position.x, msg.pose.pose.position.y, msg.pose.pose.position.z)\n\n # Obtain Transform\n try:\n self.transform = self.tfBuffer.lookup_transform('odom', 'base_footprint', rospy.Time())\n except:\n rospy.logwarn('Transform not obtained')\n # call get path function\n self.get_path()\n\n def __laser_sub(self, msg):\n '''\n Callback function for laserScan\n '''\n self.angle_min = msg.angle_min\n self.angle_max = msg.angle_max\n self.angle_increment = msg.angle_increment\n self.ranges = msg.ranges\n\n self.convert_to_points()\n self.publish_polygons()\n \n def __goal_sub(self, msg):\n '''\n Callback to recieve goal from RVIZ\n '''\n self.goal = Point(msg.pose.position.x, msg.pose.position.y, 0)\n self.goal_recieved = True\n try:\n rospy.loginfo('Path Planner Called from %f, %f to %f, %f', self.position.x, self.position.y, self.goal.x, self.goal.y)\n self.path_points = self.path_planner.get_path(Node(self.position.x, self.position.y), Node(self.goal.x, self.goal.y), self.linestrings, 1000)\n rospy.loginfo('Path planned from %f, %f to %f, %f', self.position.x, self.position.y, self.goal.x, self.goal.y)\n self.publish_path()\n \n except Exception as err:\n rospy.logerr(err)\n\n###---------------------------------------Navigation Functions----------------------------------------### \n def get_path(self):\n '''\n Funtions that checks necessary conditions and calls path\n '''\n if self.goal_recieved:\n try:\n if abs(self.position.x - self.goal.x) < 0.1 and abs(self.position.y - self.goal.y) < 0.1:\n self.not_reached = False\n except:\n pass\n if (len(self.path_points) == 0 and self.not_reached):\n try:\n rospy.loginfo('Path Planner Called from %f, %f to %f, %f', self.position.x, self.position.y, self.goal.x, self.goal.y)\n self.path_points = self.path_planner.get_path(Node(self.position.x, self.position.y), Node(self.goal.x, self.goal.y), self.linestrings, 1000)\n rospy.loginfo('Path planned from %f, %f to %f, %f', self.position.x, self.position.y, self.goal.x, self.goal.y)\n self.publish_path()\n \n except Exception as err:\n rospy.logerr(err)\n elif self.collision():\n try:\n rospy.loginfo('Path Planner Called')\n self.path_points = self.path_planner.get_path(Node(self.position.x, self.position.y), Node(self.goal.x, self.goal.y), self.linestrings, 1000)\n rospy.loginfo('Path planned from %f, %f to %f, %f', self.position.x, self.position.y, self.goal.x, self.goal.y)\n self.publish_path()\n \n except Exception as err:\n rospy.logerr(err)\n \n elif self.not_reached:\n self.publish_path()\n \n else :\n self.publish_path()\n rospy.loginfo(\"-\"*10 + 'End-Reached' + '-'*10)\n \n\n def publish_path(self):\n '''\n Publishes the path given out byt the path planner\n '''\n poses = []\n self.path = Path()\n self.path.header = Header(frame_id = 'odom')\n for point in self.path_points:\n pose = PoseStamped(header = Header(frame_id = 'odom', stamp = rospy.Time.now()), pose = Pose(position = Point(point[0], point[1], 0), orientation = Quaternion(0, 0, 0, 1)))\n poses.append(pose)\n self.path.poses = poses\n\n self.path_pub.publish(self.path)\n ### ----------------------------- Functions for LaserScan Processing-----------------------------###\n\n def convert_to_points(self):\n '''\n Converts the LaserScan to Points wrt to Bot Frame\n '''\n self.obstacles = []\n angle_min = self.angle_min\n angle_increment = self.angle_increment\n current_index = 0\n self.obstacles = [[self.convert_to_rect(self.ranges[0], angle_min)]]\n for i in range(1, len(self.ranges)):\n #Loop through the ranges\n if self.ranges[i] < 1000:\n # If the diff(y_coords) of two points is less than the given threshold the new point belongs to the same obstcale\n if abs(self.convert_to_rect(self.ranges[i], angle_min).y - self.convert_to_rect(self.ranges[i-1], angle_min - angle_increment).y) < OBST_THRES:\n self.obstacles[current_index].append(self.convert_to_rect(self.ranges[i], angle_min))\n else:\n # Else update the current index and associate new point to a new polygon\n self.obstacles.append([])\n current_index += 1\n self.obstacles[current_index].append(self.convert_to_rect(self.ranges[i], angle_min))\n else:\n pass\n \n # Finally add the angle increment \n angle_min += angle_increment\n \n def publish_polygons(self):\n '''\n Publishes Polygons as LineStrings using a very naive algorithm\n '''\n linestrings = []\n for obstacle in self.obstacles:\n try:\n linestring = LineString([(p.x, p.y) for p in obstacle])\n linestrings.append(linestring)\n except:\n pass\n self.linestrings = linestrings \n \n\n def convert_to_rect(self, r, theta):\n '''\n Used to convert from (r, theta) to (x, y)\n '''\n x1 = (r) * np.cos(theta)\n y1 = (r) * np.sin(theta)\n point1 = self.transformedPoint(Point(x1, y1, 0))\n \n #print(point.x, point.y)\n return point1\n\n\n ###----------------------------------------Dealing with transforms--------------------------------------###\n def transformedPoint(self, point):\n pointStamped = PointStamped(header = Header(frame_id = 'base_footprint'), point = Point(point.x, point.y, 0))\n pointTransformed = tf2_geometry_msgs.do_transform_point(pointStamped, self.transform)\n return pointTransformed.point \n\n ###--------------------------------------Dynamic Checking--------------------------------------------------###\n def collision(self):\n #print('checking collision')\n for line in self.linestrings:\n if len(self.path_points) >=2:\n if LineString(self.path_points).intersects(line):\n return True\n else:\n return False\n\n \n\n\nif __name__ == '__main__':\n # Initialise node\n rospy.init_node('dynManager')\n rate = rospy.Rate(10)\n \n # Manager Instance\n manager = Manager()\n\n rospy.loginfo('Manager Initiated')\n rospy.spin()\n\n\n " }, { "alpha_fraction": 0.7048192620277405, "alphanum_fraction": 0.7192770838737488, "avg_line_length": 21.432432174682617, "blob_id": "2cbf2109ba6069b6121db5bcada37894a27078be", "content_id": "8b010c5e78d48e5147fb8ce57dec8cc6ac674407", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 830, "license_type": "permissive", "max_line_length": 92, "num_lines": 37, "path": "/README.md", "repo_name": "SuhrudhSarathy/indbot", "src_encoding": "UTF-8", "text": "# Ind-bot\nAn implementation of a custom navigation stack.\n\nThis is a project done in partial fulfilment of the course, Robotics: Automation and Control\n\n## Installation\n1. Install ros-melodic\n\n2. Install python-catkin-tools\n```\n sudo apt-get install python-catkin-tools\n```\n3. Clone the repository\n```\n # Clone the repo\n cd catkin_ws/src\n git clone https://github.com/SuhrudhSarathy/indbot.git\n\n #Build the workspace\n cd ..\n catkin build\n source devel/setup.bash\n```\n## Usage\n1. Open turtlebot empty world using\n```\n roslaunch turtlebot3_gazebo turtlebot3_empty_world.launch\n```\n2. Open rviz using\n```\n roslaunch turtlebot3_gazebo turtlebot3_gazebo_rviz.launch\n```\n3. Run the following launch file\n```\n roslaunch indbot_controls demo.launch\n```\n4. To give a goal to the robot use rviz 2D Nav goal\n" }, { "alpha_fraction": 0.6969696879386902, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 15.5, "blob_id": "5287a4a4e4e7eeb1247e93dfb3c6efb5b5ad95b2", "content_id": "64e7b8414aadfe2fe936f42fe81d04741002d5f6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 33, "license_type": "permissive", "max_line_length": 25, "num_lines": 2, "path": "/TODO.txt", "repo_name": "SuhrudhSarathy/indbot", "src_encoding": "UTF-8", "text": "TO DO:\n1. Add proper README file\n" } ]
9
bicycleFair/IOT
https://github.com/bicycleFair/IOT
4d09d0d0be59c9a3b0f01c45aac93be2c0063039
6fefc5079e49ee429422f13329019489ce6165db
033ec146abd9cca49df862e155da839f622f8772
refs/heads/master
2021-01-18T15:11:55.908974
2017-03-08T16:41:54
2017-03-08T16:41:54
84,343,264
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.716160237789154, "alphanum_fraction": 0.7320442199707031, "avg_line_length": 33.380950927734375, "blob_id": "fa0da6070ecd8170ac17d3617f713fd743eaabda", "content_id": "8ba5376a04c9f2cea0f5d9b8fadf7296952b9056", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1448, "license_type": "no_license", "max_line_length": 82, "num_lines": 42, "path": "/mosquitto/publisher.py", "repo_name": "bicycleFair/IOT", "src_encoding": "UTF-8", "text": "import paho.mqtt.client as mqtt\nimport ssl,time\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"switch\", help=\"remotely control LED\")\nargs = parser.parse_args()\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\ndef on_publish(client, userdata, msg):\n print(msg.topic + \" \" + str(msg.payload))\n\n#creating a client with client-id=mqtt-test\nclient = mqtt.Client()\nclient.on_connect = on_connect\nmqtt.on_publish = on_publish\n\n#Configure network encryption and authentication options. Enables SSL/TLS support.\n#adding client-side certificates and enabling tlsv1.2 support\n\nclient.tls_set(ca_certs=\"/Users/Joe/Desktop/mosquitto/client2/ca.crt\",\n\t certfile=\"/Users/Joe/Desktop/mosquitto/client2/client.crt\",\n\t keyfile=\"/Users/Joe/Desktop/mosquitto/client2/client.key\",\n cert_reqs=ssl.CERT_REQUIRED,\n \ttls_version=ssl.PROTOCOL_TLSv1_2,\n\t\tciphers=None)\n\n#mqttc.tls_insecure_set(True)\n\n#connecting to\nclient.connect(\"129.63.17.143\", 8883, 60)\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\n# client.loop_start()\nswitch = args.switch\n(rc, mid) = client.publish(\"encyclopedia/temperature\", switch, qos=1)\n\n\n\n\n" }, { "alpha_fraction": 0.6820428371429443, "alphanum_fraction": 0.6963207125663757, "avg_line_length": 27.421875, "blob_id": "8fd4f5ca1c06e7e18f61dca013aa7b1ed82b41a0", "content_id": "53a1964452eaa22b0cc99e953773bc5d19a254cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1821, "license_type": "no_license", "max_line_length": 124, "num_lines": 64, "path": "/mosquitto/subscriber.py", "repo_name": "bicycleFair/IOT", "src_encoding": "UTF-8", "text": "import paho.mqtt.client as mqtt\nimport RPi.GPIO as GPIO\nimport json, time\nimport ssl\n\npins = [17]\n\n\ndef gpio_setup():\n GPIO.setmode(GPIO.BCM)\n for pin in pins:\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, GPIO.LOW)\n\ndef gpio_setlow(m):\n for pin in pins:\n if pin != m:\n GPIO.output(pin, GPIO.LOW)\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n#called when a topic is successfully subscribed to\ndef on_subscribe(mqttc, obj, mid, granted_qos):\n print(\"Subscribed: \"+str(mid)+\" \"+str(granted_qos)+\"data\"+str(obj))\n\ndef on_message(client, userdata, msg):\n print(msg.topic + \" \" + str(msg.payload))\n message = str(msg.payload)\n if message == \"on\":\n GPIO.output(17, True)\n else:\n GPIO.output(17, False)\n\n\n#creating a client with client-id=mqtt-test\nclient = mqtt.Client()\nclient.tls_set(ca_certs=\"/home/pi/client/ca.crt\",certfile=\"/home/pi/client/client.crt\",keyfile=\"/home/pi/client/client.key\",\n cert_reqs=ssl.CERT_REQUIRED,\n \ttls_version=ssl.PROTOCOL_TLSv1_2,\n\t\tciphers=None)\nclient.on_connect = on_connect\nmqtt.on_subscribe = on_subscribe\nclient.on_message = on_message\ngpio_setup()\n\n#Configure network encryption and authentication options. Enables SSL/TLS support.\n#adding client-side certificates and enabling tlsv1.2 support\n\n\n\n#mqttc.tls_insecure_set(True)\n\n#connecting to\nclient.connect(\"129.63.17.141\", 8883, 60)\nclient.subscribe(\"encyclopedia/#\", qos=1)\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\nclient.loop_forever()\n\n#gpio_destroy()\n\n\n" } ]
2
harini9804/OS_Assignment
https://github.com/harini9804/OS_Assignment
f96b23a09c95af11ffba6e5ab931c6d5b37bc05f
7e586eb5e72536d15bb480f38241c3a38c0f507a
fa2e1f365c6eb54773ef9d201c1e827ca954676d
refs/heads/master
2020-04-28T09:06:12.338693
2018-10-30T09:01:51
2018-10-30T09:01:51
175,153,987
0
1
null
2019-03-12T07:04:43
2019-03-12T07:04:00
2018-10-30T09:01:52
null
[ { "alpha_fraction": 0.5104045271873474, "alphanum_fraction": 0.5372422933578491, "avg_line_length": 24.455446243286133, "blob_id": "cf488ecddef2bcf5a2cd423681a0d4671e0b2dc5", "content_id": "c8050307610c692360b218b7f6ce3fd45c199ad7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10284, "license_type": "no_license", "max_line_length": 119, "num_lines": 404, "path": "/sort_package.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "'''\nSort Package [Odd Even Sort, Merge Sort, Quick Sort]- Animator.\nDone by:\nHarini R (COE16B018)\nRahul N (CED16I025)\nSaurav Purva (CED16I029)\n'''\nfrom threading import Thread\nimport threading as th\nimport time\nimport thread\nimport numpy as np\n\nimport Queue\n\nimport Tkinter as tk\nimport pygame, sys\nfrom pygame.locals import *\n\nscr_size = (width,height) = (900,600)\nFPS = 0.5\nscreen = pygame.display.set_mode(scr_size)\nclock = pygame.time.Clock()\n\nblack = (0,0,0)\nwhite = (255,255,255)\ngreen = (0,255,0)\nblue = (0,0,255)\nred = (255,0,0)\nyellow = (255,255,0)\n\norange = (255,165,0)\nlight_blue = (110,255,255)\n\n\n\narr_itr = []\nlabels=[]\nindex=0\n\ndef oddEvenSort_thread(arr,out_queue):\n # Initially array is unsorted\n isSorted = 0\n n = len(arr)\n displayarray_oe(arr,2)\n while isSorted == 0:\n isSorted = 1\n temp = 0\n for i in range(1, n-1, 2):\n if arr[i] > arr[i+1]:\n arr[i], arr[i+1] = arr[i+1], arr[i]\n displayarray_oe(arr,2)\n isSorted = 0\n\n for i in range(0, n-1, 2):\n if arr[i] > arr[i+1]:\n arr[i], arr[i+1] = arr[i+1], arr[i]\n displayarray_oe(arr,2)\n isSorted = 0\n\n print \"hello\",arr,\"hello\\n\"\n\n\n\n out_queue.put(arr)\n\n\ndef oddEvenSort(arr):\n# arr = [15,12,70,32,115,24,8,64,16,128,51,52,84,23,96,6,97,3,99,0]\n thr = []\n afterOE = []\n x = len(arr) % 4\n my_queue = Queue.Queue()\n\n displayarray_oe(arr,2)\n\n i=0\n while( i< len(arr) ):\n print \"i is \",i\n thr.append(th.Thread(target=oddEvenSort_thread, args=(arr[i:i+4], my_queue, )))\n i = i+4\n\n # thr.append(th.Thread(target=oddEvenSort,args=(arr[0:4],my_queue,)) )\n # thr.append(th.Thread(target=oddEvenSort,args=(arr[4:8],my_queue,)) )\n # thr.append(th.Thread(target=oddEvenSort,args=(arr[8:12],my_queue,)) )\n # thr.append(th.Thread(target=oddEvenSort,args=(arr[12:16],my_queue,)) )\n # thr.append(th.Thread(target=oddEvenSort,args=(arr[16:20],my_queue,)) )\n\n for thread in thr:\n thread.start()\n x=x+1\n\n for thread in thr:\n ret = thread.join()\n\n while(my_queue.empty() == False ):\n afterOE.extend(my_queue.get())\n print \"afterOE is \", afterOE\n\n mergeSort(afterOE)\n\n print \"after mergesort\", afterOE\n arr = afterOE[:]\n\n if(sorted(arr)== arr):\n print \"SORTED.\"\n sys.exit()\n sorted(arr)\n\n\ndef displayarray_oe(arr,opt):\n basicfont = pygame.font.SysFont(None, 30)\n image = pygame.Surface((width - width/5,height - height/5))\n rect = image.get_rect()\n rect.top = height/10\n rect.left = width/10\n width_per_bar = rect.width/len(arr) - 2\n\n l = 0\n mid = len(arr)/2 -1\n\n const_width = width_per_bar\n\n for k in range(0,rect.width,width_per_bar + 2):\n bar = pygame.Surface((width_per_bar,arr[l]*10))\n bar_rect = bar.get_rect()\n if(opt==1):\n if(l <= mid):\n bar.fill(blue)\n else:\n bar.fill(green)\n if(opt==2):\n if(l % 2 ==0):\n bar.fill(light_blue)\n else:\n bar.fill(orange)\n bar_rect.bottom = rect.height\n bar_rect.left = k\n\n ele_text = basicfont.render(str(arr[l]), True, red)\n ele_textrect = ele_text.get_rect()\n ele_textrect.centerx = bar_rect.left + const_width/2\n ele_textrect.centery = bar_rect.bottom - 10\n\n image.blit(bar,bar_rect)\n image.blit(ele_text,ele_textrect)\n l += 1\n if l == len(arr):\n break\n\n str_text = 'Array size: '+str(len(arr))\n text = basicfont.render(str_text, True, red)\n textrect = text.get_rect()\n textrect.centerx = screen.get_rect().centerx\n textrect.centery = height/10\n screen.fill(black)\n screen.blit(image,rect)\n screen.blit(text,textrect)\n pygame.display.update()\n clock.tick(FPS)\n\n\ndef qsort(arr,low,high):\n\n if low < high:\n\n i = low - 1\n pivot = arr[high]\n pi = high\n\n for j in range(low,high):\n\n if arr[j] <=pivot:\n i = i+1\n arr[i],arr[j] = arr[j],arr[i]\n displayarray_qs(arr,j,pi)\n arr[i+1],arr[high] = arr[high],arr[i+1]\n\n pi = i+1\n # print(\"thread {0} is sorting {1} and pivot is {2}\".format(threading.current_thread(), arr[low:high+1], pivot))\n\n\n\n lthread = None\n rthread = None\n\n print \"The array after pivot positioning \", arr\n arr_itr.append([arr[low:pi],arr[pi],arr[pi+1:high+1]])\n displayarray_qs(arr,-1,pi)\n lthread = Thread(target = lambda: qsort(arr,low,pi-1))\n lthread.start()\n\n rthread = Thread(target=lambda: qsort(arr,pi+1,high))\n rthread.start()\n\n if lthread is not None: lthread.join()\n if rthread is not None: rthread.join()\n return arr\n\ndef mergeSort(arr):\n if len(arr)>1:\n mid = len(arr)/2\n lefthalf = arr[:mid]\n righthalf = arr[mid:]\n displayarray_ms(arr,mid-1)\n t1 = th.Thread(target=mergeSort, args=(lefthalf,))\n t2 = th.Thread(target=mergeSort, args=(righthalf,))\n\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n\n i=0\n j=0\n k=0\n\n # print \"lefthalf: \",lefthalf\n # print \"righthalf: \",righthalf\n\n while i<len(lefthalf) and j<len(righthalf):\n # print lefthalf[i],\" \",righthalf[j]\n if lefthalf[i]<righthalf[j]:\n arr[k]=lefthalf[i]\n i=i+1\n k=k+1\n else:\n arr[k]=righthalf[j]\n j=j+1\n k=k+1\n\n while i < len(lefthalf):\n arr[k]=lefthalf[i]\n i=i+1\n k=k+1\n while j < len(righthalf):\n arr[k]=righthalf[j]\n j=j+1\n k=k+1\n displayarray_ms(arr,mid)\n\n\ndef displayarray_ms(arr,mid):\n basicfont = pygame.font.SysFont(None, 30)\n image = pygame.Surface((width - width/5,height - height/5))\n rect = image.get_rect()\n rect.top = height/10\n rect.left = width/10\n width_per_bar = rect.width/len(arr) - 2\n\n # ele_text = []\n # ele_textrect = []\n\n\n\n l = 0\n mid = len(arr)/2 -1\n\n const_width = width_per_bar\n\n for k in range(0,rect.width,width_per_bar + 2):\n bar = pygame.Surface((width_per_bar,arr[l]*10))\n bar_rect = bar.get_rect()\n\n\n\n if(l <= mid):\n bar.fill(blue)\n else:\n bar.fill(green)\n bar_rect.bottom = rect.height\n bar_rect.left = k\n\n ele_text = basicfont.render(str(arr[l]), True, red)\n ele_textrect = ele_text.get_rect()\n ele_textrect.centerx = bar_rect.left + const_width/2\n ele_textrect.centery = bar_rect.bottom - 10\n\n image.blit(bar,bar_rect)\n image.blit(ele_text,ele_textrect)\n l += 1\n if l == len(arr):\n break\n\n\n\n str_text = 'Array size: '+str(len(arr))\n text = basicfont.render(str_text, True, red)\n textrect = text.get_rect()\n textrect.centerx = screen.get_rect().centerx\n textrect.centery = height/10\n screen.fill(black)\n screen.blit(image,rect)\n screen.blit(text,textrect)\n # for i in range(0,len(arr)):\n # screen.blit(ele_text[i],ele_textrect[i])\n pygame.display.update()\n clock.tick(FPS)\n\n\ndef displayarray_qs(arr, hlt, pi):\n basicfont = pygame.font.SysFont(None, 30)\n image = pygame.Surface((width - width/5,height - height/5))\n rect = image.get_rect()\n rect.top = height/10\n rect.left = width/10\n width_per_bar = rect.width/len(arr) - 2\n\n l = 0\n const_width = width_per_bar\n\n for k in range(0,rect.width,width_per_bar + 2):\n bar = pygame.Surface((width_per_bar,arr[l]*10))\n bar_rect = bar.get_rect()\n if(l == pi):\n bar.fill(green)\n elif(l==hlt and hlt>0):\n bar.fill(yellow)\n else:\n bar.fill(white)\n bar_rect.bottom = rect.height\n bar_rect.left = k\n\n ele_text = basicfont.render(str(arr[l]), True, red)\n ele_textrect = ele_text.get_rect()\n ele_textrect.centerx = bar_rect.left + const_width/2\n ele_textrect.centery = bar_rect.bottom - 10\n\n image.blit(bar,bar_rect)\n image.blit(ele_text,ele_textrect)\n l += 1\n if l == len(arr):\n break\n\n\n str_text = 'Pivot: '+str(arr[pi])\n text = basicfont.render(str_text, True, red)\n textrect = text.get_rect()\n textrect.centerx = screen.get_rect().centerx\n textrect.centery = height/10\n screen.fill(black)\n screen.blit(image,rect)\n screen.blit(text,textrect)\n pygame.display.update()\n clock.tick(FPS)\n\n\nls = []\noption = input('Enter option 1.Quick Sort 2.Merge Sort 3. Odd Even Sort:')\nprint option\n\nif option not in [1,2,3]:\n option = 1\nn = input('Enter number of elements: ')\nprint n\nprint \"Enter the elements: \"\nfor i in range(0,n):\n x = input()\n ls.append(int(x))\n\nprint ls\npygame.init()\npygame.display.set_caption('Sort')\nwhile True:\n if option == 1:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.KEYDOWN:\n pass\n if event.type == pygame.KEYUP:\n pass\n if sorted(ls) != ls:\n res = qsort(ls, 0, len(ls) - 1)\n else:\n displayarray_qs(ls,0,n-1)\n elif option == 2:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.KEYDOWN:\n pass\n if event.type == pygame.KEYUP:\n pass\n if sorted(ls) != ls:\n mergeSort(ls)\n else:\n displayarray_ms(ls,n/2 - 1)\n\n elif option == 3:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.KEYDOWN:\n pass\n if event.type == pygame.KEYUP:\n pass\n if sorted(ls) != ls:\n print \"Again.\"\n oddEvenSort(ls)\n else:\n print \"DONE!\"\n displayarray_oe(ls, 2)\n" }, { "alpha_fraction": 0.525223970413208, "alphanum_fraction": 0.5530410408973694, "avg_line_length": 24.25, "blob_id": "82ddea16777aa46bbdb5e22642c0bfd6a557835a", "content_id": "3e7aea9ff1b93b3284974c9d3bbfe904b705b80c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2121, "license_type": "no_license", "max_line_length": 118, "num_lines": 84, "path": "/quick_sort_2.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "from threading import Thread\nimport threading\nimport time\nimport thread\nimport numpy as np\n\nimport Tkinter as tk\n\narr_itr = []\nlabels=[]\nindex=0\n\ndef qsort(arr,low,high):\n\n if low < high:\n\n i = low - 1\n pivot = arr[high]\n\n for j in range(low,high):\n\n if arr[j] <=pivot:\n i = i+1\n arr[i],arr[j] = arr[j],arr[i]\n arr[i+1],arr[high] = arr[high],arr[i+1]\n\n pi = i+1\n print(\"thread {0} is sorting {1} and pivot is {2}\".format(threading.current_thread(), arr[low:high+1], pivot))\n\n\n\n lthread = None\n rthread = None\n\n print \"The array after pivot positioning \", arr\n arr_itr.append([arr[low:pi],arr[pi],arr[pi+1:high+1]])\n lthread = Thread(target = lambda: qsort(arr,low,pi-1))\n lthread.start()\n\n rthread = Thread(target=lambda: qsort(arr,pi+1,high))\n rthread.start()\n\n if lthread is not None: lthread.join()\n if rthread is not None: rthread.join()\n return arr\n\n\n'''testing below'''\nls = [10,5,1,3,6,4,15,9,2,13,8,12,16,7]\nn=len(ls)\nchk_arr = []\nroot = tk.Tk()\nres = qsort(ls, 0, len(ls) - 1)\nfor each in arr_itr:\n print \" --> \",each[0],each[1],each[2], \" \"\n labels.append( tk.Label(root, text=\" \".join( str(each[0]) ),fg=\"blue\" ) )\n labels.append(tk.Label(root, text = \"Pivot is: \"+str(each[1]), fg = \"red\") )\n labels.append(tk.Label(root, text =\" \".join(str(each[2])),fg=\"blue\" ) )\n\ni=0\nwhile(i<len(labels)-1):\n\n labels[i].grid(row=i,column=0)\n labels[i+1].grid(row=i,column=2)\n labels[i+2].grid(row=i,column=4)\n i=i+3\n\n\n # big_arr = arr_itr[i]\n # chk_arr.extend(big_arr[0])\n # chk_arr.append(big_arr[1])\n # chk_arr.extend(big_arr[2])\n # prev_arr = arr_itr[i-1]\n # if np.array_equal(chk_arr,prev_arr[0]):\n # labels[i].grid(row=i,column = 0)\n # col = col/2\n # else:\n # labels[i].grid(row=i,column = col/2+1)\n\nlabel = tk.Label(root,text = \"Sorted array: \"+ \" \".join(str(res)),fg = \"green\" )\nlabel.grid(row=len(labels),column = 2)\n\nroot.mainloop()\nprint(res)\n" }, { "alpha_fraction": 0.6544342637062073, "alphanum_fraction": 0.6788991093635559, "avg_line_length": 22.35714340209961, "blob_id": "fbbdd7001d84a669fb7be7ef75b52329ac50d7ba", "content_id": "b3dc7029008c56d3ac2f8aa97178685bc9ec8021", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 64, "num_lines": 14, "path": "/guilabel.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "import Tkinter as tk\n\n# if you are still working under a Python 2 version,\n# comment out the previous line and uncomment the following line\n# import Tkinter as tk\n\nroot = tk.Tk()\nitem = [2,5,4,1,3]\nw = tk.Label(root, text=\" \".join(str(item)) )\nw.pack()\nw1 = tk.Label(root, text=\" \".join(str(item)) )\nw1.pack()\n\nroot.mainloop()\n" }, { "alpha_fraction": 0.5716791152954102, "alphanum_fraction": 0.5957913398742676, "avg_line_length": 33.05970001220703, "blob_id": "cf8cc6a92f41c16722b05e5d2a523709541809cf", "content_id": "4f143d222d1bf9d0ee03f5280c94a3b4772d181d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2281, "license_type": "no_license", "max_line_length": 78, "num_lines": 67, "path": "/test.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "import random\nfrom Tkinter import *\n\nclass Sorting(Frame):\n def __init__(self):\n Frame.__init__(self)\n self.function = {0:self.bubble, 1:self.quick, 2:self.shell}\n self.master.title(\"Sorting\")\n self.master.rowconfigure(5, weight=1)\n self.master.columnconfigure(5, weight=1)\n self.grid(sticky=W+E+N+S )\n\n #label for sort intro\n self.label1 = Label(self, text=\"Select Sort\", width=25, height=2)\n self.label1.grid(row=0, column=1, sticky=N)\n\n #Radio buttons for sorts\n self.v = IntVar()\n for indx, button in enumerate(('Bubble', 'Quick', 'Shell')):\n name = \"%s Sort\" % button\n button = Radiobutton(self, text=name, variable=self.v, value=indx)\n button.grid(row=1, column=indx, sticky=W+E+N+S)\n button.deselect()\n\n #button to generate number\n self.button4 = Button(self,text='Generate no.',command=self.gen)\n self.button4.grid(row=2, column=1, sticky=W+E+N+S)\n self.rowconfigure(5, weight=1)\n self.columnconfigure(5, weight=1)\n\n def create_but2sort(self):\n self.button5 = Button(self, text='start sorting', command=self.sortit)\n self.button5.grid(row=4, column=1, sticky=W+E+N+S)\n self.rowconfigure(5, weight=1 )\n self.columnconfigure(5, weight=1)\n\n def gen(self):\n self.nums = [random.randint(0, 100) for x in range(10)]\n num = ''.join('%4i' % num for num in self.nums)\n self.label2 = Label(self, text=num, width=2, height=2)\n self.label2.grid(row =3, columnspan=10, sticky = W+E+N+S)\n self.create_but2sort()\n\n def sortit(self):\n function = self.function[self.v.get()]\n result = function()\n num = ''.join('%4i' % num for num in result)\n self.label3 = Label(self, text=num, width=2, height=2)\n self.label3.grid(row=5, columnspan=10, sticky=W+E+N+S )\n\n def bubble(self):\n print('bubble to be implemented')\n return sorted(self.nums)\n\n def shell(self):\n print('shell to be implemented')\n return sorted(self.nums)\n\n def quick(self):\n print('quick to be implemented')\n return sorted(self.nums)\n\ndef main():\n Sorting().mainloop()\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5350515246391296, "alphanum_fraction": 0.5587629079818726, "avg_line_length": 23.25, "blob_id": "d323031c9c8c35f6404a03564e77bd23d85ff840", "content_id": "2e64b7de3d2b8e3b824c80dd8c64a9184ae76cc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 970, "license_type": "no_license", "max_line_length": 121, "num_lines": 40, "path": "/quickk_sort_2.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "from threading import Thread\nimport threading\nimport time\nimport thread\n\ndef qsort(arr,low,high):\n\n if low < high:\n\n i = left - 1\n pivot = arr[high]\n\n for j in range(low,high):\n\n if arr[j] <=pivot:\n i = i+1\n arr[i],arr[j] = arr[j],arr[i]\n arr[i+1],arr[high] = arr[high],arr[i+1]\n\n\n print(\"thread {0} is sorting {1} and pivot is {2}\".format(threading.current_thread(), sets[left:right+1], pivot))\n lthread = None\n rthread = None\n\n print \"The array after pivot positioning \", sets\n lthread = Thread(target = lambda: qsort(sets,left,j))\n lthread.start()\n \n rthread = Thread(target=lambda: qsort(sets,i,right))\n rthread.start()\n\n if lthread is not None: lthread.join()\n if rthread is not None: rthread.join()\n return sets\n\n\n'''testing below'''\nls = [10,5,1,3,6,4,9,2,8,16,7]\nres = qsort(ls, 0, len(ls) - 1)\nprint(res)\n" }, { "alpha_fraction": 0.5929977893829346, "alphanum_fraction": 0.5995623469352722, "avg_line_length": 14.266666412353516, "blob_id": "8b4d8d5215833282514cdde277eb3b42ace95848", "content_id": "3f3acfdeb5f6b6bae7b780a892e4bc8fd8b340ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 457, "license_type": "no_license", "max_line_length": 41, "num_lines": 30, "path": "/shell_sort.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "def shell_sort(arr):\n\tn=len(arr)\n\tgap=n/2\n\twhile gap>0:\n\t\tfor i in range(gap,n):\n\t\t\ttemp=arr[i]\n\n\t\t\tj=i\n\t\t\twhile j>=gap and arr[j-gap]>temp:\n\t\t\t\tarr[j]=arr[j-gap]\n\t\t\t\tj -= gap\n\t\t\tarr[j]=temp\n\n\t\tgap /= 2\n\nn=int(input(\"Enter the size of array: \"))\narr=[]\nfor i in range(n):\n\tx=int(input(\"-> \"))\n\tarr.append(x)\n\nprint(\"Array before shorting \")\nfor i in range(n):\n\tprint(arr[i])\n\nshell_sort(arr)\n\nprint(\"Array after shorting \")\nfor i in range(n):\n\tprint(arr[i])" }, { "alpha_fraction": 0.5321100950241089, "alphanum_fraction": 0.5810397267341614, "avg_line_length": 14.571428298950195, "blob_id": "7c55625b5e3d78ab2d5094e377897bb1c794a658", "content_id": "43873f0692e4ef17ffc42264c570d368c03f9fa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 327, "license_type": "no_license", "max_line_length": 46, "num_lines": 21, "path": "/demo_thread.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "import threading as th\n\ndef sum(a,b):\n for i in range(0,5):\n print a+b,\" \"\n\ndef multiply(a,b):\n for i in range(0,5):\n print a*b, \" \"\n\nt1 = th.Thread(target=sum, args=(10,5,));\nt2 = th.Thread(target=multiply, args=(10,5,));\n\nt1.start()\nt2.start()\n\nt1.join()\nt2.join()\n\nprint \"Done\"\nprint \"Hope no errors :D\"\n" }, { "alpha_fraction": 0.49416491389274597, "alphanum_fraction": 0.533001720905304, "avg_line_length": 24.368932723999023, "blob_id": "099837ce8211c98e676892f565ca287e7f2fc40e", "content_id": "b57cb24e1d43f52be021f71e0163e420e237ce45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5227, "license_type": "no_license", "max_line_length": 87, "num_lines": 206, "path": "/odd_even_sort.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "\nfrom threading import Thread\nimport threading as th\n\nimport Queue\n\nimport time\nimport thread\nimport numpy as np\n\nimport Tkinter as tk\nimport pygame,sys\nfrom pygame.locals import *\n\nscr_size = (width,height) = (900,600)\nFPS = 0.5\nscreen = pygame.display.set_mode(scr_size)\nclock = pygame.time.Clock()\nblack = (0,0,0)\nwhite = (255,255,255)\ngreen = (0,255,0)\nblue = (0,0,255)\nred = (255,0,0)\n\norange = (255,165,0)\nlight_blue = (110,255,255)\n\npygame.display.set_caption('Odd Even Sort')\n\n\ndef mergeSort(arr):\n if len(arr)>1:\n mid = len(arr)/2\n lefthalf = arr[:mid]\n righthalf = arr[mid:]\n displayarray(arr,1)\n t1 = th.Thread(target=mergeSort, args=(lefthalf,))\n t2 = th.Thread(target=mergeSort, args=(righthalf,))\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n i=0\n j=0\n k=0\n # print \"lefthalf: \",lefthalf\n # print \"righthalf: \",righthalf\n while i<len(lefthalf) and j<len(righthalf):\n # print lefthalf[i],\" \",righthalf[j]\n if lefthalf[i]<righthalf[j]:\n arr[k]=lefthalf[i]\n i=i+1\n k=k+1\n else:\n arr[k]=righthalf[j]\n j=j+1\n k=k+1\n while i < len(lefthalf):\n arr[k]=lefthalf[i]\n i=i+1\n k=k+1\n while j < len(righthalf):\n arr[k]=righthalf[j]\n j=j+1\n k=k+1\n\n displayarray(arr,1)\n\ndef oddEvenSort_thread(arr,out_queue):\n # Initially array is unsorted\n isSorted = 0\n n = len(arr)\n displayarray(arr,2)\n while isSorted == 0:\n isSorted = 1\n temp = 0\n for i in range(1, n-1, 2):\n if arr[i] > arr[i+1]:\n arr[i], arr[i+1] = arr[i+1], arr[i]\n displayarray(arr,2)\n isSorted = 0\n\n for i in range(0, n-1, 2):\n if arr[i] > arr[i+1]:\n arr[i], arr[i+1] = arr[i+1], arr[i]\n displayarray(arr,2)\n isSorted = 0\n\n print \"hello\",arr,\"hello\\n\"\n\n\n\n out_queue.put(arr)\n\n\ndef oddEvenSort(arr):\n# arr = [15,12,70,32,115,24,8,64,16,128,51,52,84,23,96,6,97,3,99,0]\n thr = []\n afterOE = []\n x = len(arr) % 4\n my_queue = Queue.Queue()\n\n displayarray(arr,2)\n\n i=0\n while( i< len(arr) ):\n print \"i is \",i\n thr.append(th.Thread(target=oddEvenSort_thread, args=(arr[i:i+4], my_queue, )))\n i = i+4\n\n # thr.append(th.Thread(target=oddEvenSort,args=(arr[0:4],my_queue,)) )\n # thr.append(th.Thread(target=oddEvenSort,args=(arr[4:8],my_queue,)) )\n # thr.append(th.Thread(target=oddEvenSort,args=(arr[8:12],my_queue,)) )\n # thr.append(th.Thread(target=oddEvenSort,args=(arr[12:16],my_queue,)) )\n # thr.append(th.Thread(target=oddEvenSort,args=(arr[16:20],my_queue,)) )\n\n for thread in thr:\n thread.start()\n x=x+1\n\n for thread in thr:\n ret = thread.join()\n\n while(my_queue.empty() == False ):\n afterOE.extend(my_queue.get())\n print \"afterOE is \", afterOE\n\n mergeSort(afterOE)\n\n print \"after mergesort\", afterOE\n arr = afterOE[:]\n\n if(sorted(arr)== arr):\n print \"SORTED.\"\n sys.exit()\n sorted(arr)\n\n\ndef displayarray(arr,opt):\n basicfont = pygame.font.SysFont(None, 30)\n image = pygame.Surface((width - width/5,height - height/5))\n rect = image.get_rect()\n rect.top = height/10\n rect.left = width/10\n width_per_bar = rect.width/len(arr) - 2\n\n l = 0\n mid = len(arr)/2 -1\n\n const_width = width_per_bar\n\n for k in range(0,rect.width,width_per_bar + 2):\n bar = pygame.Surface((width_per_bar,arr[l]*10))\n bar_rect = bar.get_rect()\n if(opt==1):\n if(l <= mid):\n bar.fill(blue)\n else:\n bar.fill(green)\n if(opt==2):\n if(l % 2 ==0):\n bar.fill(light_blue)\n else:\n bar.fill(orange)\n bar_rect.bottom = rect.height\n bar_rect.left = k\n\n ele_text = basicfont.render(str(arr[l]), True, red)\n ele_textrect = ele_text.get_rect()\n ele_textrect.centerx = bar_rect.left + const_width/2\n ele_textrect.centery = bar_rect.bottom - 10\n\n image.blit(bar,bar_rect)\n image.blit(ele_text,ele_textrect)\n l += 1\n if l == len(arr):\n break\n\n str_text = 'Array size: '+str(len(arr))\n text = basicfont.render(str_text, True, red)\n textrect = text.get_rect()\n textrect.centerx = screen.get_rect().centerx\n textrect.centery = height/10\n screen.fill(black)\n screen.blit(image,rect)\n screen.blit(text,textrect)\n pygame.display.update()\n clock.tick(FPS)\n\n'''testing below'''\narr = [100,5,1,3,6,16,4,15,9,30,2,13,8,12,16,7]\nn=len(arr)\npygame.init()\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.KEYDOWN:\n pass\n if event.type == pygame.KEYUP:\n pass\n if sorted(arr) != arr:\n print \"Again.\"\n oddEvenSort(arr)\n else:\n print \"DONE!\"\n displayarray(arr, 2)\n" }, { "alpha_fraction": 0.5424836874008179, "alphanum_fraction": 0.5947712659835815, "avg_line_length": 16, "blob_id": "b9e7cf275007bee359381d1b6e0aa29735ee0d6a", "content_id": "ca23fe46caa43c4a71c95d458549d282694bb6cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 153, "license_type": "no_license", "max_line_length": 27, "num_lines": 9, "path": "/demo_file.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "def hello_world(arr):\n for each in arr:\n each = each +2\n return arr\n\n\narr1 = [4,7,8,90]\nret = hello_world(arr1)\nprint ret, \" is after func\"\n" }, { "alpha_fraction": 0.7372134327888489, "alphanum_fraction": 0.7707231044769287, "avg_line_length": 23.65217399597168, "blob_id": "e4f8686833ab5104af58ef48147b725bfde7d169", "content_id": "70d356a3ce560c7e3392201b85bc8e4430ec2991", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 567, "license_type": "no_license", "max_line_length": 107, "num_lines": 23, "path": "/README.md", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "# OS_Assignment\nMultithreading sorting package\n\nWritten in Python2.\nUses pygame 1.9.4 for the animation.\n\nThis package has three sorting implementations available, namely, Odd-Even Sort, Quick Sort and Merge Sort.\n\nRun the program with the follwing command:\npython sort_package.py\n\nIn the CLI:\nEnter the option to pick a sorting algorithm.\n\nThen enter the number of elements followed by the elements.\n\nThe pygame window will open with the animation of the sort.\n\n\nDone by:\nHarini R (COE16B018), Rahul Nenavath (CED16I025), Saurva Purva (CED16I029).\n\nUnder the guidance of Dr Sivaselvan B.\n" }, { "alpha_fraction": 0.5110172033309937, "alphanum_fraction": 0.5424087047576904, "avg_line_length": 23.18248176574707, "blob_id": "fb10053c9b966615836f3bf8cbde0412ac3820bf", "content_id": "93b3fdc809507e8ba781ee4e2187fa84c4db8085", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3313, "license_type": "no_license", "max_line_length": 63, "num_lines": 137, "path": "/merge_thread_pygametext.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "from threading import Thread\nimport threading as th\nimport time\nimport thread\nimport numpy as np\n\nimport Tkinter as tk\nimport pygame, sys\nfrom pygame.locals import *\n\nscr_size = (width,height) = (900,600)\nFPS = 0.5\nscreen = pygame.display.set_mode(scr_size)\nclock = pygame.time.Clock()\nblack = (0,0,0)\nwhite = (255,255,255)\nred = (255,0,0)\ngreen = (0,255,0)\nblue = (0,0,255)\n\npygame.display.set_caption('Merge Sort')\n\ndef mergeSort(arr):\n if len(arr)>1:\n mid = len(arr)/2\n lefthalf = arr[:mid]\n righthalf = arr[mid:]\n displayarray(arr,mid-1)\n t1 = th.Thread(target=mergeSort, args=(lefthalf,))\n t2 = th.Thread(target=mergeSort, args=(righthalf,))\n\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n\n i=0\n j=0\n k=0\n\n # print \"lefthalf: \",lefthalf\n # print \"righthalf: \",righthalf\n\n while i<len(lefthalf) and j<len(righthalf):\n # print lefthalf[i],\" \",righthalf[j]\n if lefthalf[i]<righthalf[j]:\n arr[k]=lefthalf[i]\n i=i+1\n k=k+1\n else:\n arr[k]=righthalf[j]\n j=j+1\n k=k+1\n\n while i < len(lefthalf):\n arr[k]=lefthalf[i]\n i=i+1\n k=k+1\n while j < len(righthalf):\n arr[k]=righthalf[j]\n j=j+1\n k=k+1\n displayarray(arr,mid)\n\ndef displayarray(arr,mid):\n basicfont = pygame.font.SysFont(None, 30)\n image = pygame.Surface((width - width/5,height - height/5))\n rect = image.get_rect()\n rect.top = height/10\n rect.left = width/10\n width_per_bar = rect.width/len(arr) - 2\n\n # ele_text = []\n # ele_textrect = []\n\n\n\n l = 0\n mid = len(arr)/2 -1\n\n const_width = width_per_bar\n\n for k in range(0,rect.width,width_per_bar + 2):\n bar = pygame.Surface((width_per_bar,arr[l]*10))\n bar_rect = bar.get_rect()\n\n\n\n if(l <= mid):\n bar.fill(blue)\n else:\n bar.fill(green)\n bar_rect.bottom = rect.height\n bar_rect.left = k\n\n ele_text = basicfont.render(str(arr[l]), True, red)\n ele_textrect = ele_text.get_rect()\n ele_textrect.centerx = bar_rect.left + const_width/2\n ele_textrect.centery = bar_rect.bottom - 10\n\n image.blit(bar,bar_rect)\n image.blit(ele_text,ele_textrect)\n l += 1\n if l == len(arr):\n break\n\n\n\n str_text = 'Array size: '+str(len(arr))\n text = basicfont.render(str_text, True, red)\n textrect = text.get_rect()\n textrect.centerx = screen.get_rect().centerx\n textrect.centery = height/10\n screen.fill(black)\n screen.blit(image,rect)\n screen.blit(text,textrect)\n # for i in range(0,len(arr)):\n # screen.blit(ele_text[i],ele_textrect[i])\n pygame.display.update()\n clock.tick(FPS)\n\n'''testing below'''\narr = [100,5,1,3,6,16,4,15,9,30,2,13,8,12,16,7]\nn=len(arr)\npygame.init()\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.KEYDOWN:\n pass\n if event.type == pygame.KEYUP:\n pass\n if sorted(arr) != arr:\n mergeSort(arr)\n else:\n displayarray(arr,n/2 - 1)\n" }, { "alpha_fraction": 0.41493383049964905, "alphanum_fraction": 0.44612476229667664, "avg_line_length": 26.128204345703125, "blob_id": "487d8f56235d806f27a7e69c89cd8ff90fd0d8f8", "content_id": "2f4b238f70b11afac1cd29217519ec9a7bbccf9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1058, "license_type": "no_license", "max_line_length": 59, "num_lines": 39, "path": "/merge_thread.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "import threading as th\ndef mergeSort(arr):\n if len(arr)>1:\n mid = len(arr)/2\n lefthalf = arr[:mid]\n righthalf = arr[mid:]\n t1 = th.Thread(target=mergeSort, args=(lefthalf,))\n t2 = th.Thread(target=mergeSort, args=(righthalf,))\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n i=0\n j=0\n k=0\n # print \"lefthalf: \",lefthalf\n # print \"righthalf: \",righthalf\n while i<len(lefthalf) and j<len(righthalf):\n # print lefthalf[i],\" \",righthalf[j]\n if lefthalf[i]<righthalf[j]:\n arr[k]=lefthalf[i]\n i=i+1\n k=k+1\n else:\n arr[k]=righthalf[j]\n j=j+1\n k=k+1\n while i < len(lefthalf):\n arr[k]=lefthalf[i]\n i=i+1\n k=k+1\n while j < len(righthalf):\n arr[k]=righthalf[j]\n j=j+1\n k=k+1\n # print \"merging\", arr\n #\n# arr = [13,4,1,7,90,24,113,63]\n# mergeSort(arr)\n" }, { "alpha_fraction": 0.5386052131652832, "alphanum_fraction": 0.5700498223304749, "avg_line_length": 23.89922523498535, "blob_id": "bfce8fdc1c667385ec6b1e044406eb8e9a8da2ed", "content_id": "6e5b50254319db9eaafecb670af7452a65052916", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3212, "license_type": "no_license", "max_line_length": 118, "num_lines": 129, "path": "/quick_sort_2_pygame.py", "repo_name": "harini9804/OS_Assignment", "src_encoding": "UTF-8", "text": "from threading import Thread\nimport threading\nimport time\nimport thread\nimport numpy as np\n\nimport Tkinter as tk\nimport pygame, sys\nfrom pygame.locals import *\n\nscr_size = (width,height) = (900,600)\nFPS = 0.5\nscreen = pygame.display.set_mode(scr_size)\nclock = pygame.time.Clock()\nblack = (0,0,0)\nwhite = (255,255,255)\ngreen = (0,255,0)\nred = (255,0,0)\nyellow = (255,255,0)\n\npygame.display.set_caption('Quick Sort')\n\narr_itr = []\nlabels=[]\nindex=0\n\ndef qsort(arr,low,high):\n\n if low < high:\n\n i = low - 1\n pivot = arr[high]\n pi = high\n\n for j in range(low,high):\n\n if arr[j] <=pivot:\n i = i+1\n arr[i],arr[j] = arr[j],arr[i]\n displayarray(arr,j,pi)\n arr[i+1],arr[high] = arr[high],arr[i+1]\n\n pi = i+1\n print(\"thread {0} is sorting {1} and pivot is {2}\".format(threading.current_thread(), arr[low:high+1], pivot))\n\n\n\n lthread = None\n rthread = None\n\n print \"The array after pivot positioning \", arr\n arr_itr.append([arr[low:pi],arr[pi],arr[pi+1:high+1]])\n displayarray(arr,-1,pi)\n lthread = Thread(target = lambda: qsort(arr,low,pi-1))\n lthread.start()\n\n rthread = Thread(target=lambda: qsort(arr,pi+1,high))\n rthread.start()\n\n if lthread is not None: lthread.join()\n if rthread is not None: rthread.join()\n return arr\n\ndef displayarray_qs(arr, hlt, pi):\n basicfont = pygame.font.SysFont(None, 30)\n image = pygame.Surface((width - width/5,height - height/5))\n rect = image.get_rect()\n rect.top = height/10\n rect.left = width/10\n width_per_bar = rect.width/len(arr) - 2\n\n l = 0\n const_width = width_per_bar\n\n for k in range(0,rect.width,width_per_bar + 2):\n bar = pygame.Surface((width_per_bar,arr[l]*10))\n bar_rect = bar.get_rect()\n if(l == pi):\n bar.fill(green)\n elif(l==hlt and hlt>0):\n bar.fill(yellow)\n else:\n bar.fill(white)\n bar_rect.bottom = rect.height\n bar_rect.left = k\n\n ele_text = basicfont.render(str(arr[l]), True, red)\n ele_textrect = ele_text.get_rect()\n ele_textrect.centerx = bar_rect.left + const_width/2\n ele_textrect.centery = bar_rect.bottom - 10\n\n image.blit(bar,bar_rect)\n image.blit(ele_text,ele_textrect)\n l += 1\n if l == len(arr):\n break\n\n\n str_text = 'Pivot: '+str(arr[pi])\n text = basicfont.render(str_text, True, red)\n textrect = text.get_rect()\n textrect.centerx = screen.get_rect().centerx\n textrect.centery = height/10\n screen.fill(black)\n screen.blit(image,rect)\n screen.blit(text,textrect)\n pygame.display.update()\n clock.tick(FPS)\n\n'''testing below'''\nls = [100,5,1,3,6,16,4,15,9,30,2,13,8,12,16,7]\nn=len(ls)\npygame.init()\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit()\n if event.type == pygame.KEYDOWN:\n pass\n if event.type == pygame.KEYUP:\n pass\n if sorted(ls) != ls:\n res = qsort(ls, 0, len(ls) - 1)\n else:\n displayarray(ls,0,n-1)\n\n\n\nprint(res)\n" } ]
13
HappyMana/text_to_speech
https://github.com/HappyMana/text_to_speech
cc0dbb6dd51a52149355e7098873c9137043c40c
70288b49ae9c92fcdcf57757464d5580865d6310
2dee8505599de862b3f959b6a9abca5ba4d0cdef
refs/heads/main
2023-03-23T20:02:09.358395
2021-03-23T07:35:57
2021-03-23T07:35:57
350,603,738
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5884892344474792, "alphanum_fraction": 0.5932853817939758, "avg_line_length": 27.95833396911621, "blob_id": "241f08ef277091eec718e1ef41eec123c7ab4337", "content_id": "2c3e2a8158dec891f9fa607dc1d519d5a4afec3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2085, "license_type": "no_license", "max_line_length": 106, "num_lines": 72, "path": "/src/tts_server.py", "repo_name": "HappyMana/text_to_speech", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n#[tts_stdserver.py]\n\nimport roslib\nimport rospy\nfrom text_to_speech.srv import tts, ttsResponse\n\nfrom google.cloud import texttospeech\n\nimport wave\nimport pyaudio\n\nFilename = 'output.wav'\n\nclass tts_server(object):\n\n def __init__(self):\n rospy.init_node('text_to_speech')\n self.srv = rospy.Service('/tts', tts, self.execute)\n rospy.loginfo(\"Ready to texttospeech stdserver\")\n rospy.spin()\n\n def execute(self, data):\n\n client = texttospeech.TextToSpeechClient()\n synthesis_input = texttospeech.SynthesisInput(text=data.sentence)\n voice = texttospeech.VoiceSelectionParams(\n language_code='en-US',\n name='en-US-Wavenet-F',\n #ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL\n )\n audio_config = texttospeech.AudioConfig(\n audio_encoding=texttospeech.AudioEncoding.LINEAR16)\n\n response = client.synthesize_speech(input=synthesis_input, voice=voice, audio_config=audio_config)\n\n with open(Filename, 'wb') as out:\n out.write(response.audio_content)\n print('Audio content written to file ' + Filename)\n\n self.PlayWaveFile()\n return ttsResponse()\n\n def PlayWaveFile(self):\n try:\n wf = wave.open(Filename, \"rb\")\n print(\"Time[s]:\", float(wf.getnframes()) / wf.getframerate())\n except FileNotFoundError:\n print(\"[Error 404] No such file or directory: \" + Filename)\n return\n\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True)\n\n chunk = 1024\n data = wf.readframes(chunk)\n while data != b'':\n stream.write(data)\n data = wf.readframes(chunk)\n stream.stop_stream()\n stream.close()\n p.terminate()\n return\n\n\nif __name__ == '__main__':\n tts_server()\n rospy.spin()\n" }, { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 22, "blob_id": "8d85c5f73a712c197375eea1273e6e54a3164ba1", "content_id": "26c33b4c349da6c4ca4ace2a2a7a1ad3686524a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 70, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/README.md", "repo_name": "HappyMana/text_to_speech", "src_encoding": "UTF-8", "text": "# text_to_speech\ntext_to_speechのサービスサーバー(未完成)\n" } ]
2
chishs/CSCI2300
https://github.com/chishs/CSCI2300
68d8fcee60ee2ccf526947c9de2155c06b6d2abb
5ad61ad046d41bf216b59c7e3c39a94bf40cb7bf
223a3184071bdf0c4f2e8132f2a9382906e76869
refs/heads/master
2021-01-23T08:44:04.401091
2017-09-06T00:10:13
2017-09-06T00:10:13
102,541,405
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.458156019449234, "alphanum_fraction": 0.4784869849681854, "avg_line_length": 27.58108139038086, "blob_id": "39a27537e0e3b58909a3b1dc76e2c6931e71707d", "content_id": "63b3662a6e82e934267165a332aa8b303d7c241b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2115, "license_type": "no_license", "max_line_length": 63, "num_lines": 74, "path": "/Lab6.py", "repo_name": "chishs/CSCI2300", "src_encoding": "UTF-8", "text": "import heapdict\n\n\ndef dijkstra(G, s):\n dist = [10000000 for _ in range(len(G))]\n prev = [None for _ in range(len(G))]\n dist[s] = 0\n hd = heapdict.heapdict()\n for i in range(0, len(G)):\n hd[i] = dist[i]\n\n while len(hd) != 0:\n u = hd.popitem()[0]\n for i in range(0, len(G[u])):\n v = G[u][i][0]\n if int(dist[v]) > int(dist[u] + G[u][i][1]):\n dist[v] = int(dist[u] + G[u][i][1])\n prev[v] = int(u)\n hd.__setitem__(v, dist[v])\n\n path = []\n for i in range(0, len(prev)):\n string = str(i) + \": \" + str(dist[i]) + \",\"\n if prev[i] is not None:\n j = prev[i]\n path.insert(0, i)\n path.insert(0, j)\n while j != s:\n j = prev[j]\n path.insert(0, j)\n print(string + str(path))\n elif prev[i] is None and i == s:\n path.append(i)\n print(str(i) + \": 0,\" + str(path))\n path = []\n\n\ndef getG(n):\n dataFile = open(n, \"r\")\n num_lines = sum(1 for _ in open(n))\n tempG = [[0 for _ in range(3)] for _ in range(num_lines)]\n graph = [[]]\n for i in range(0, num_lines):\n line = dataFile.readline().strip(\"\\n\")\n tokens = line.split(\" \", 2)\n line.split(\" \", num_lines)\n if len(tokens) > 1:\n for j in range(0, 3):\n tempG[i][j] = int(tokens[j])\n largestNode = calcMax(n)\n for i in range(0, largestNode):\n graph.append([])\n for i in range(0, num_lines):\n graph[(tempG[i][0])].append([tempG[i][1], tempG[i][2]])\n\n return graph\n\n\n# Calculate the number of nodes in a graph\ndef calcMax(n):\n dataFile = open(n, \"r\")\n num_lines = sum(1 for _ in open(n))\n largestNode = 0\n for i in range(0, num_lines):\n line = dataFile.readline().strip(\"\\n\")\n tokens = line.split(\" \", 2)\n if largestNode < int(tokens[0]):\n largestNode = int(tokens[0])\n if largestNode < int(tokens[1]):\n largestNode = int(tokens[1])\n return largestNode\n\n\ndijkstra(getG(\"data.txt\"), 1)\n" }, { "alpha_fraction": 0.48273059725761414, "alphanum_fraction": 0.5083299279212952, "avg_line_length": 19.508333206176758, "blob_id": "e84e94d9a7c5684917161b1ba82092055ecd250f", "content_id": "291229d90305e268d646e0a54da14675cfe6ab30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2461, "license_type": "no_license", "max_line_length": 83, "num_lines": 120, "path": "/Lab2.py", "repo_name": "chishs/CSCI2300", "src_encoding": "UTF-8", "text": "import random\nimport time\n\n\n# Get bit-length of an integer\ndef bit_length(x):\n str = bin(x)\n str = str.lstrip('-0b')\n return len(str)\n\n\n# Generate a random d-digit integer\ndef genRand(d):\n rand = random.randint(10 ** (d - 1), (10 ** d) - 1)\n return rand\n\n\n# Method 1\n# Implementation of the grade school method\n# of multiplication using bit-shifts only.\n\n# noinspection PyShadowingNames\ndef gradeSchoolMult(x, y):\n # type: (int, int) -> int\n sum = 0\n i = 0\n # Base case\n if y == 0:\n return 0\n\n else:\n while y > 0:\n temp = y\n y >>= 1\n # Check to see if the current bit is a 1 or a 0\n if y << 1 != temp:\n # Add to the sum, x shifted by i bits if the current bit of y is on\n sum += x << i\n i += 1\n\n return sum\n\n\n# Method 2\n# Implementation of fig 1.1\n\ndef multiply(x, y):\n # Base Case\n if y == 0:\n return 0\n\n z = multiply(x, y >> 1)\n\n temp = y\n y >>= 1\n if y << 1 == temp:\n return z << 1\n else:\n return x + (z << 1)\n\n\n# Method 3\n# Implementation of divide and conquer approach\n\n# noinspection PyShadowingNames\ndef divAndConquerMult(x, y):\n x_bit_len = bit_length(x)\n y_bit_len = bit_length(y)\n\n if x_bit_len > y_bit_len:\n n = x_bit_len\n else:\n n = y_bit_len\n\n if y == 0 or x == 0:\n return 0\n\n if x_bit_len == 1 or y_bit_len == 1:\n if x_bit_len == 1:\n return y\n else:\n return x\n\n m = n >> 1\n\n x_l = x >> m\n mask_x = (1 << m) - 1\n x_r = x & mask_x\n\n mask_y = (1 << m) - 1\n y_l = y >> m\n y_r = y & mask_y\n\n p_1 = divAndConquerMult(x_l, y_l)\n p_2 = divAndConquerMult(x_r, y_r)\n p_3 = divAndConquerMult((x_l + x_r), (y_l + y_r))\n\n return (p_1 << ((n >> 1) << 1)) + ((p_3 - p_1 - p_2) << (n >> 1)) + p_2\n\n\nx = genRand(100000)\ny = genRand(100000)\n\nstart = time.time()\nprint(\"Method One\")\nprint(\"x,y: \" + str(x) + \", \" + str(y))\nprint(\"Product: \" + str(gradeSchoolMult(x, y)))\nprint(\"Time: \" + str(time.time() - start) + \"\\n\")\n\n#start = time.time()\n#print(\"Method Two\")\n#print(\"x,y: \" + str(x) + \", \" + str(y))\n#print(\"Product: \" + str(multiply(x, y)))\n#print(\"Time: \" + str(time.time() - start) + \"\\n\")\n\nstart = time.time()\nprint(\"Method Three\")\nprint(\"x,y: \" + str(x) + \", \" + str(y))\nprint(\"Product: \" + str(divAndConquerMult(x, y)))\nprint(\"Time: \" + str(time.time() - start) + \"\\n\")\n" }, { "alpha_fraction": 0.4923448860645294, "alphanum_fraction": 0.5080580115318298, "avg_line_length": 32.09333419799805, "blob_id": "cdc14b5790135b46ab42cfc9d8d55fa6f92a6b5d", "content_id": "eb7ee9870e27634f57adad858cc4b42f8ae65d7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4964, "license_type": "no_license", "max_line_length": 113, "num_lines": 150, "path": "/Lab9.py", "repo_name": "chishs/CSCI2300", "src_encoding": "UTF-8", "text": "import heapdict\n\n\ndef dijkstra(G, E, s, t):\n dist = [float('inf') for _ in range(len(G))]\n prev = [None for _ in range(len(G))]\n dist[s] = 0\n hd = heapdict.heapdict()\n for i in range(0, len(G)):\n hd[i] = dist[i]\n\n while len(hd) != 0:\n u = hd.popitem()[0]\n for i in range(0, len(G[u])):\n v = G[u][i]\n if E.get((u, v)) is not None and float(dist[v]) > float(dist[u] + E.get((u, v))):\n dist[v] = float(dist[u] + E.get((u, v)))\n prev[v] = int(u)\n hd.__setitem__(v, dist[v])\n path = []\n i = len(prev) - 1\n if i == t:\n if prev[i] is not None:\n j = prev[i]\n path.insert(0, i)\n path.insert(0, j)\n while j != s:\n j = prev[j]\n path.insert(0, j)\n elif prev[i] is None and i == s:\n path.append(i)\n\n return path, dist\n\n\ndef getG(n):\n dataFile = open(n, \"r\")\n num_lines = sum(1 for _ in open(n))\n tempG = [[0 for _ in range(3)] for _ in range(num_lines)]\n graph = [[]]\n for i in range(0, num_lines):\n line = dataFile.readline().strip(\"\\n\")\n tokens = line.split(\" \", 2)\n line.split(\" \", num_lines)\n if len(tokens) > 1:\n for j in range(0, 3):\n tempG[i][j] = int(tokens[j])\n largestNode = calcMaxMin(n)\n for i in range(0, largestNode[0]):\n graph.append([])\n for i in range(0, num_lines):\n if tempG[i][1] and tempG[i][2]:\n graph[(tempG[i][0])].append(tempG[i][1])\n\n return graph\n\n\ndef createEdgeDict(n):\n dataFile = open(n, \"r\")\n num_lines = sum(1 for _ in open(n))\n edgeDict = {}\n for i in range(0, num_lines):\n line = dataFile.readline().strip(\"\\n\")\n tokens = line.split(\" \", 2)\n line.split(\" \", num_lines)\n if len(tokens) > 1:\n edgeDict.update({(int(tokens[0]), int(tokens[1])): int(tokens[2])})\n\n return edgeDict\n\n\n# Calculate the number of nodes in a graph\ndef calcMaxMin(n):\n dataFile = open(n, \"r\")\n num_lines = sum(1 for _ in open(n))\n largestNode = 0\n minNode = num_lines\n for i in range(0, num_lines):\n line = dataFile.readline().strip(\"\\n\")\n tokens = line.split(\" \", 2)\n if largestNode < int(tokens[0]):\n largestNode = int(tokens[0])\n if largestNode < int(tokens[1]):\n largestNode = int(tokens[1])\n if minNode > int(tokens[0]):\n minNode = int(tokens[0])\n if minNode > int(tokens[1]):\n minNode = int(tokens[1])\n return largestNode, minNode\n\n\ndef fordFulkerson(G, E, s, t):\n maxFlow = 0\n residual = getG(\"testlab9.txt\")\n residualEdges = createEdgeDict(\"testlab9.txt\")\n minOnPath = float('Inf')\n\n path = dijkstra(residual, residualEdges, s, t)[0]\n while len(path) != 0:\n path = dijkstra(residual, residualEdges, s, t)[0]\n for i in range(0, len(path) - 1):\n if residualEdges.get((path[i], path[i + 1])) < minOnPath:\n minOnPath = residualEdges.get((path[i], path[i + 1]))\n if len(path) > 0:\n maxFlow += minOnPath\n\n for j in range(0, len(path) - 1):\n residualEdges.update({(path[j], path[j + 1]): residualEdges.get((path[j], path[j + 1])) - minOnPath})\n residualEdges.update({(path[j + 1], path[j]): minOnPath})\n\n list_does_contain = next((True for item in residual[path[j + 1]] if item == path[j]), False)\n if not list_does_contain:\n residual[path[j + 1]].append(path[j])\n\n for i in range(0, len(residual)):\n for j in range(0, len(residual[i])):\n if len(residual[i]) > j and residualEdges.get((i, residual[i][j])) == 0:\n residualEdges.update({(i, residual[i][j]): float('inf')})\n residual[i].remove(residual[i][j])\n\n dist1 = dijkstra(residual, E, s, t)[1]\n\n scut = []\n tcut = []\n for i in range(0, len(dist1)):\n if dist1[i] != float('inf'):\n scut.append(i)\n else:\n tcut.append(i)\n print(\"Set S: \" + str(scut))\n print(\"Set V-S: \" + str(tcut))\n print(\"MIN-CUT Edge Weights\")\n edges = createEdgeDict(\"testlab9.txt\")\n sum = 0\n for j in range(0, len(scut)):\n for i in range(0, len(G)):\n if residualEdges.get((i, scut[j])) < float('inf'):\n if residualEdges.get((i, scut[j])) is not None and not scut.__contains__(i):\n print(\"(\" + str(i) + \",\" + \" \" + str(scut[j]) + \")\" + \" : \" + str(edges.get((scut[j], i))))\n sum += edges.get((scut[j], i))\n print(\"SUM: \" + str(sum))\n print(residualEdges)\n return maxFlow\n\n\nmaxMinTuple = calcMaxMin(\"testlab9.txt\")\ns = maxMinTuple[1]\nt = maxMinTuple[0]\n\nprint(\"MAX FLOW: \" + str(fordFulkerson(getG(\"testlab9.txt\"), createEdgeDict(\"testlab9.txt\"), s, t)))\n" }, { "alpha_fraction": 0.503459095954895, "alphanum_fraction": 0.5242138504981995, "avg_line_length": 27.141592025756836, "blob_id": "d8901e62b3ede68d27f65b36ba95e318ef2e177f", "content_id": "4e8c8c56939c0940badf2fdb8edb97417a0f8e5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3180, "license_type": "no_license", "max_line_length": 63, "num_lines": 113, "path": "/Lab4.py", "repo_name": "chishs/CSCI2300", "src_encoding": "UTF-8", "text": "import random\n\nimport numpy\nimport time\n\n\n# Basic n^3 matrix multiplication algorithm\ndef basic_matrix(X, Y):\n A = [[0 for _ in range(len(X))] for _ in range(len(X[0]))]\n\n # Rows\n for i in range(0, len(X)):\n # Columns\n for j in range(0, len(X[0])):\n for k in range(0, len(Y[0])):\n A[i][j] += X[i][k] * Y[k][j]\n\n return numpy.array(A)\n\n\ndef strassenMult(X, Y):\n size = len(X) // 2\n\n if len(X) == 1:\n return basic_matrix(X, Y)\n\n A = [[0 for _ in range(size)] for _ in range(size)]\n B = [[0 for _ in range(size)] for _ in range(size)]\n C = [[0 for _ in range(size)] for _ in range(size)]\n D = [[0 for _ in range(size)] for _ in range(size)]\n\n E = [[0 for _ in range(size)] for _ in range(size)]\n F = [[0 for _ in range(size)] for _ in range(size)]\n G = [[0 for _ in range(size)] for _ in range(size)]\n H = [[0 for _ in range(size)] for _ in range(size)]\n\n # Make the n/2 x n/2 sub matrices\n for i in range(size):\n for j in range(size):\n A[i][j] = X[i][j]\n B[i][j] = X[i][j + size]\n C[i][j] = X[i + size][j]\n D[i][j] = X[i + size][j + size]\n\n E[i][j] = Y[i][j]\n F[i][j] = Y[i][j + size]\n G[i][j] = Y[i + size][j]\n H[i][j] = Y[i + size][j + size]\n\n # Calculate P1...P7\n P1 = strassenMult(A, subMatrix(F, H))\n P2 = strassenMult(addMatrix(A, B), H)\n P3 = strassenMult(addMatrix(C, D), E)\n P4 = strassenMult(D, subMatrix(G, E))\n P5 = strassenMult(addMatrix(A, D), addMatrix(E, H))\n P6 = strassenMult(subMatrix(B, D), addMatrix(G, H))\n P7 = strassenMult(subMatrix(A, C), addMatrix(E, F))\n\n # Calculate sub-matrices of the product matrix\n P11 = addMatrix(subMatrix(addMatrix(P5, P4), P2), P6)\n P12 = addMatrix(P1, P2)\n P21 = addMatrix(P3, P4)\n P22 = subMatrix(subMatrix(addMatrix(P1, P5), P3), P7)\n\n P = [[0 for _ in range(len(X))] for _ in range(len(X))]\n\n # Create product matrix using the sub-matrices\n for i in range(size):\n for j in range(size):\n P[i][j] = P11[i][j]\n P[i][size + j] = P12[i][j]\n P[i + size][j] = P21[i][j]\n P[i + size][j + size] = P22[i][j]\n\n return numpy.array(P)\n\n\ndef addMatrix(X, Y):\n A = [[0 for _ in range(len(X))] for _ in range(len(X[0]))]\n for i in range(len(X)):\n for j in range(len(X)):\n # Add corresponding elements in each matrix\n A[i][j] = X[i][j] + Y[i][j]\n return A\n\n\ndef subMatrix(X, Y):\n A = [[0 for _ in range(len(X))] for _ in range(len(X[0]))]\n for i in range(len(X)):\n for j in range(len(X)):\n # Add corresponding elements in each matrix\n A[i][j] = X[i][j] - Y[i][j]\n return A\n\n\nn = 1024\nX = numpy.random.rand(n, n)\nY = numpy.random.rand(n, n)\n# print(X)\n# print(Y)\nstart = time.time()\nnumpy.dot(X, Y)\nprint (\"Numpy: \" + str(time.time() - start))\n\nstart = time.time()\nbasic_matrix(X, Y)\nprint (\"Basic matrix run time: \" + str(time.time() - start))\n\nstart = time.time()\nstrassenMult(X, Y)\nprint (\"Strassen matrix run time: \" + str(time.time() - start))\n\n# print(numpy.dot(X, Y))\n" }, { "alpha_fraction": 0.4621026813983917, "alphanum_fraction": 0.49959251284599304, "avg_line_length": 27.534883499145508, "blob_id": "a3901e9fdb4b76356e0efed623369df2d116826d", "content_id": "b05176d69cb9c89c76abe73e159a4b4a0fbe9b2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2454, "license_type": "no_license", "max_line_length": 97, "num_lines": 86, "path": "/Lab8.py", "repo_name": "chishs/CSCI2300", "src_encoding": "UTF-8", "text": "# Created by Sajiel\n\ndef editDistance(s1, s2):\n m = len(s1)\n n = len(s2)\n # Hold edit distance values for each possible substring\n edit = [[None for _ in range(0, n+1)] for _ in range(0, m+1)]\n # Hold previous direction\n prev = [[None for _ in range(0, n+1)] for _ in range(0, m+1)]\n # Initial conditions\n for i in range(0, m+1):\n edit[i][0] = i\n for j in range(0, n+1):\n edit[0][j] = j\n # End initial conditions\n\n # Main loop\n for i in range(1, m+1):\n for j in range(1, n+1):\n diff = 1\n if s1[i-1] == s2[j-1]:\n diff = 0\n nextEdit = minEdit(edit[i - 1][j] + 1, edit[i][j - 1] + 1, edit[i - 1][j - 1] + diff)\n edit[i][j] = nextEdit[0]\n if nextEdit[1] == 0:\n prev[i][j] = \"UP\"\n elif nextEdit[1] == 1:\n prev[i][j] = \"LEFT\"\n elif nextEdit[1] == 2:\n prev[i][j] = \"DIAG\"\n\n alignmentS1 = \"\"\n alignmentS2 = \"\"\n i = m\n j = n\n prevDirection = prev[i][j]\n while prevDirection is not None:\n prevDirection = prev[i][j]\n if prevDirection == \"UP\":\n alignmentS1 += s1[i-1]\n alignmentS2 += \"_\"\n i -= 1\n elif prevDirection == \"LEFT\":\n alignmentS1 += \"_\"\n alignmentS2 += s2[j-1]\n j -= 1\n elif prevDirection == \"DIAG\":\n alignmentS1 += s1[i-1]\n alignmentS2 += s2[j-1]\n i -= 1\n j -= 1\n\n # Handle end conditions where one string is shorter than the other\n if m < n:\n i = n-m\n while i >= 0:\n alignmentS2 += s2[i]\n alignmentS1 += \"_\"\n i -= 1\n if n < m:\n i = m-n\n while i >= 0:\n alignmentS1 += s1[i]\n alignmentS2 += \"_\"\n i -= 1\n print(alignmentS1[::-1])\n print(alignmentS2[::-1])\n print(\"Edit Distance: \" + str(edit[m][n]))\n\n\ndef minEdit(n1, n2, n3):\n if n1 < n2 and n1 < n3:\n previousDirection = 0\n nextEdit = n1\n elif n2 < n1 and n2 < n3:\n previousDirection = 1\n nextEdit = n2\n else:\n previousDirection = 2\n nextEdit = n3\n return nextEdit, previousDirection\n\nX = 'CATAAGCTTCTGACTCTTACCTCCCTCTCTCCTACTCCTGCTCGCATCTGCTATAGTGGAGGCCGGAGCAGGAACAGGTTGAACAG'\nY = 'CGTAGCTTTTTGGTTAATTCCTCCTTCAGGTTTGATGTTGGTAGCAAGCTATTTTGTTGAGGGTGCTGCTCAGGCTGGATGGA'\n\neditDistance(X, Y)\n" }, { "alpha_fraction": 0.5645788311958313, "alphanum_fraction": 0.5680345296859741, "avg_line_length": 22.149999618530273, "blob_id": "60db54b63360873f4eae6c95600211bf7777031d", "content_id": "e9f8dbf1c1c610f3a1ebaf7f75c24e74ca5243f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2315, "license_type": "no_license", "max_line_length": 75, "num_lines": 100, "path": "/Lab7.py", "repo_name": "chishs/CSCI2300", "src_encoding": "UTF-8", "text": "from heapq import *\n\n\nclass Node(object):\n left = None\n right = None\n char = None\n frequency = 0\n\n def __init__(self, ch, f):\n self.char = ch\n self.frequency = f\n\n def setChildren(self, ln, rn):\n self.left = ln\n self.right = rn\n\n # Implement for PQ comparisons\n def __cmp__(self, a):\n return cmp(self.frequency, a.frequency)\n\n\ndef huffman(f):\n # Fill the queue with a node for each letter, frequency combination\n # Set the character of the node\n # Count the number of times the character occurs and set the frequency\n pq = []\n for key in f:\n n = Node(key, f[key])\n pq.append(n)\n\n # Create PQ\n heapify(pq)\n\n while len(pq) > 1:\n # Get left and right children\n l = heappop(pq)\n r = heappop(pq)\n # Create new node with frequency of the sum of children\n n = Node(None, r.frequency + l.frequency)\n n.setChildren(l, r)\n # Add the node to the heap\n heappush(pq, n)\n\n codes = {}\n\n # Start at root\n findEncoding(\"\", pq[0], codes)\n\n return codes\n\n\ndef getFrequencies(n):\n freqs = {}\n # Open file\n # Count characters\n # Update dictionary as new characters are found\n with open(n) as f:\n for line in f:\n for ch in line:\n if freqs.__contains__(ch):\n freqs[ch] += 1\n else:\n freqs.update({ch: 1})\n return freqs\n\n\n# Recursively walk the tree in order to get the encoding\ndef findEncoding(path, n, codes):\n # Check to see if there is a character or not associated with this node\n if n.char:\n # Update dictionary with the encoding\n codes[n.char] = path\n else:\n # Walk left\n findEncoding(path + \"0\", n.left, codes)\n # Walk right\n findEncoding(path + \"1\", n.right, codes)\n\n\n# Encode the message using the code dictionary\ndef encodeMessage(code):\n encoding = \"\"\n length = 0\n with open(n) as f:\n for line in f:\n for ch in line:\n encoding += code[ch]\n length += len(code[ch])\n print(\"Length: \" + str(length))\n return encoding\n\n\ninput = \"\"\nn = \"huffman.txt\"\nfreqs = getFrequencies(n)\nprint(freqs)\n\nprint(huffman(freqs))\nprint(encodeMessage(huffman(freqs)))\n" }, { "alpha_fraction": 0.37246376276016235, "alphanum_fraction": 0.5260869860649109, "avg_line_length": 25.538461685180664, "blob_id": "2a7effa017aa8b9bfd4dfe5bb3a78d030af7de16", "content_id": "c30932912b232f2b095a6d275467783ace6022a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1380, "license_type": "no_license", "max_line_length": 120, "num_lines": 52, "path": "/Lab3.py", "repo_name": "chishs/CSCI2300", "src_encoding": "UTF-8", "text": "import math\nimport random\n\n\ndef modexp(x, y, N):\n if y == 0:\n return 1\n z = modexp(x, int(math.floor(y / 2)), N)\n if y % 2 == 0:\n return (z * z) % N\n else:\n return x * ((z * z) % N)\n\n\ndef primality(N, k):\n rand = []\n isPrime = False\n for x in range(0, k):\n rand.append(0)\n for y in range(0, k):\n rand[y] = random.randint(1, N - 1)\n for i in range(0, len(rand)):\n if modexp(rand[i], N - 1, N) == 1:\n isPrime = True\n else:\n return False\n return isPrime\n\n\ndef primalitycar(N, k):\n rand = []\n yes = 0\n for x in range(0, k):\n rand.append(0)\n for y in range(0, k):\n rand[y] = random.randint(1, N - 1)\n for i in range(0, len(rand)):\n if modexp(rand[i], N - 1, N) == 1:\n yes += 1\n\n return yes\n\ncarmichael = [561, 1105, 1729, 2465, 2821, 6601, 8911, 10585, 15841, 29341, 41041, 46657, 52633, 62745, 63973, 75361,\n 101101, 115921, 126217, 162401, 172081, 188461, 252601, 278545, 294409, 314821, 334153, 340561, 399001,\n 410041, 449065, 488881]\nnum = 12341327\nprint(\"Primality of \" + str(num) + \" is: \" + str(primality(num, 1000)))\n\nfor i in range(0, len(carmichael)):\n print(\n \"Probability of \" + str(carmichael[i]) + \" is: \" + str(float(primalitycar(carmichael[i], 1000)) / float(1000)) +\n \"%\")\n" } ]
7
ssteele/xecutables
https://github.com/ssteele/xecutables
b30d174e79d0832a8d3f2e1fe70979c133fb85a9
4b567ce8276531f1a470f57463b952f1e212e152
50e3558929221e8b051551adaf0caa7c5c383ceb
refs/heads/master
2023-05-25T01:03:24.851054
2023-05-14T18:44:29
2023-05-14T18:44:29
6,356,763
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4814888536930084, "alphanum_fraction": 0.4842754900455475, "avg_line_length": 18.323076248168945, "blob_id": "10332400dd6b59c8f7f4bd1454e6000e2784e8cd", "content_id": "c410ff80afb0a3b3160b6a11e30ac45c4d402adb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5024, "license_type": "no_license", "max_line_length": 106, "num_lines": 260, "path": "/environments/projects/bs/bs-core.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports bs_core_path bs_core_assets_path bs_core_documentation_path bs_core_tools_path work dt\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n return\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# SET BS CORE ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# custom site\nenv=${bs_core_path}\n\n# set root\ncd ${env}\nsetR\n\n# set global config and routing\ncd ${gR}/app/config\nsetF\nsetO\n\n# set mysql db folder\ncd ${gR}/app/mysql\nsetD\n\n# set tests\ncd ${gR}/test\nsetT\n\n# set assets folder\ncd ${gR}/web\nsetA\n\n# set vagrant\ngR\nsetV\n\n# set assets\nif [ -d \"${bs_core_assets_path}\" ]; then\n cd ${bs_core_assets_path}\n year=`date \"+%Y\"`\n\n if [ -d $year ]; then\n cd ${year}\n fi\n\n setAA\nfi\n\n# set documentation\ncd ${bs_core_documentation_path}\nsetDD\n\n# set tools\ncd ${bs_core_tools_path}\nsetTT\n\n# set current\ncd ${dt}/bs\nsetCC\n\n# set bundles\nbs_bundles=${gR}/src/BiteSquad\ncd ${bs_bundles}\nsetb\n\n# set current bundle\nif [[ -n \"$1\" ]]; then\n bundle=\"$(tr '[:lower:]' '[:upper:]' <<< ${1:0:1})${1:1}\"\n\n # set bundle root\n cd ${bs_bundles}/${bundle}Bundle\n setr\n\n # set bundle routing or fallback to app/config\n if [[ -d ${gr}/Resources/config ]]; then\n cd ${gr}/Resources/config\n seto\n else\n unsete o\n fi\n\n # set controllers\n if [[ -d ${gr}/Controller ]]; then\n cd ${gr}/Controller\n setc\n else\n unsete c\n fi\n\n # set entities\n if [[ -d ${gr}/Entity ]]; then\n cd ${gr}/Entity\n sete\n else\n unsete e\n fi\n\n # set repositories\n if [[ -d ${gr}/Repository ]]; then\n cd ${gr}/Repository\n setm\n else\n unsete m\n fi\n\n # set services\n if [[ -d ${gr}/Service ]]; then\n cd ${gr}/Service\n sets\n else\n unsete s\n fi\n\n # set views\n if [[ -d ${gr}/Resources/views ]]; then\n cd ${gr}/Resources/views\n setv\n else\n unsete v\n fi\n\n # set js\n if [[ -d ${gr}/Resources/public/js ]]; then\n cd ${gr}/Resources/public/js\n setj\n else\n unsete j\n fi\n\n # set styles\n if [[ -d ${gr}/Resources/public/scss ]]; then\n cd ${gr}/Resources/public/scss\n setx\n elif [[ -d ${gr}/Resources/public/css ]]; then\n cd ${gr}/Resources/public/css\n setx\n else\n unsete x\n fi\n\n # set tests\n if [[ -d ${gr}/Tests ]]; then\n cd ${gr}/Tests\n sett\n else\n unsete t\n fi\n\n secondary_bundle='Core'\n if [[ -n \"$2\" ]]; then\n secondary_bundle=\"$(tr '[:lower:]' '[:upper:]' <<< ${2:0:1})${2:1}\"\n fi\n\n if [[ 'Core' != \"$bundle\" ]]; then\n\n # set secondary bundle root\n cd ${bs_bundles}/${secondary_bundle}Bundle\n setrr\n\n # set secondary bundle routing or fallback to app/config\n if [[ -d ${grr}/Resources/config ]]; then\n cd ${grr}/Resources/config\n setoo\n else\n unsete oo\n fi\n\n # set secondary bundle controllers\n if [[ -d ${grr}/Controller ]]; then\n cd ${grr}/Controller\n setcc\n else\n unsete cc\n fi\n\n # set secondary bundle entities\n if [[ -d ${grr}/Entity ]]; then\n cd ${grr}/Entity\n setee\n else\n unsete ee\n fi\n\n # set secondary bundle repositories\n if [[ -d ${grr}/Repository ]]; then\n cd ${grr}/Repository\n setmm\n else\n unsete mm\n fi\n\n # set secondary bundle services\n if [[ -d ${grr}/Service ]]; then\n cd ${grr}/Service\n setss\n else\n unsete ss\n fi\n\n # set secondary bundle views\n if [[ -d ${grr}/Resources/views ]]; then\n cd ${grr}/Resources/views\n setvv\n else\n unsete vv\n fi\n\n # set secondary bundle js\n if [[ -d ${grr}/Resources/public/js ]]; then\n cd ${grr}/Resources/public/js\n setjj\n else\n unsete jj\n fi\n\n # set secondary bundle styles\n if [[ -d ${grr}/Resources/public/scss ]]; then\n cd ${grr}/Resources/public/scss\n setxx\n elif [[ -d ${grr}/Resources/public/css ]]; then\n cd ${grr}/Resources/public/css\n setxx\n else\n unsete xx\n fi\n\n # set secondary bundle tests\n if [[ -d ${grr}/Tests ]]; then\n cd ${grr}/Tests\n settt\n else\n unsete tt\n fi\n\n fi\n\nfi\n\ngR\n" }, { "alpha_fraction": 0.6710526347160339, "alphanum_fraction": 0.6710526347160339, "avg_line_length": 11.666666984558105, "blob_id": "7d6a6cf85f3d3b8db8f09c21d2c8d1b941725f3d", "content_id": "53be7f22dac212d44dd70f03bef46212692325ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 76, "license_type": "no_license", "max_line_length": 31, "num_lines": 6, "path": "/projects/fg/get-ssns.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ntail ${gD}/data/ssns.txt\n${xec}/projects/fg/get-ssn.bash\n\nexit\n" }, { "alpha_fraction": 0.641791045665741, "alphanum_fraction": 0.641791045665741, "avg_line_length": 10.166666984558105, "blob_id": "e73b787004d8de8622f9e324bcd3637464981165", "content_id": "94cf039c32bbcb21f4a2a68a8ca5bb13a0eb3162", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 67, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/projects/fg/get-vins.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ntail ${gD}/data/vins.txt\n${xec}/fg/get-vin.bash\n\nexit\n" }, { "alpha_fraction": 0.5519733428955078, "alphanum_fraction": 0.5530850291252136, "avg_line_length": 25.47058868408203, "blob_id": "a8e087250410456fe70c944cee577ef84182d720", "content_id": "d031a5fe65bcc3fe7dbac3cab6530138cb0dbdfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1799, "license_type": "no_license", "max_line_length": 83, "num_lines": 68, "path": "/create-vscode-task.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports='editor_path sa_assets_path sa_main_assets_path sa_nest_assets_path'\nbash_aliases='mm'\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# CREATE VSCODE TASK\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\necho ''\necho 'I should hook this up some day'\necho ''\nexit\n\nboilerplate=\"sa-nest\"\nassets_path=\"${sa_nest_assets_path}\"\n\n# get task id\nif [[ -n \"$1\" ]]; then\n task_id=\"$1\"\nelse\n echo 'No task ID passed in by create_task.bash'\n exit\nfi\n\nif echo ${task_id} | grep -qE '(BE|FE|GN|OM|TA)\\d+$'; then\n\n # copy sublime boilerplate\n cd ${editor_path}/sublime-projects-tasks\n cp boilerplates/${boilerplate}.sublime-project \"${task_id}.sublime-project\"\n cp boilerplates/${boilerplate}.sublime-workspace \"${task_id}.sublime-workspace\"\n ${xec}/rename-sublime-project.pl ${task_id}\n\n # get current year (folder)\n year=$(date +'%Y')\n\n # copy assets boilerplate\n cd ${assets_path}/${year}\n mkdir ${task_id}\n cp -r ${sa_assets_path}/task-template/* ${task_id}\n\n ${xec}/sublime-task.pl ${task_id}\n # n ${bs_core_documentation_path}/proc/dev-update.txt ${task_id}/_notes.txt\n n ${task_id}/_notes.txt\n\nelse\n\n # copy sublime boilerplate\n cd ${editor_path}/sublime-projects-tasks\n cp boilerplates/global.sublime-project \"${task_id}.sublime-project\"\n cp boilerplates/global.sublime-workspace \"${task_id}.sublime-workspace\"\n\n ${xec}/rename-sublime-project.pl ${task_id}\n ${xec}/sublime-task.pl ${task_id}\n\nfi\n\nexit" }, { "alpha_fraction": 0.7642276287078857, "alphanum_fraction": 0.7642276287078857, "avg_line_length": 29.75, "blob_id": "714069252bd99e7719150ccde0d5c57a57027511", "content_id": "e1a8dec28c8267eec10436afe6f1d1eaeff6c431", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 246, "license_type": "no_license", "max_line_length": 101, "num_lines": 8, "path": "/new-ssh.command", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "osascript -e 'tell application \"Terminal\"\n\tactivate\n\ttell application \"System Events\"\n\t\tkeystroke \"n\" using {command down}\n\tend tell\nend tell'\n\nosascript -e 'tell application \"Terminal\" set current settings of first window to settings set \"SSH\"'\n" }, { "alpha_fraction": 0.5554722547531128, "alphanum_fraction": 0.5599700212478638, "avg_line_length": 13.659340858459473, "blob_id": "2bfaa056d0948fda7f4013526e75e58f5fbd5f51", "content_id": "11121497783cca20ef76b7b693362ff58fc2ee66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1334, "license_type": "no_license", "max_line_length": 69, "num_lines": 91, "path": "/environments/archives/fg/appsite.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports appsite_path phoenix_assets_path work\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n return\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# SET FG APPSITE ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# custom site\nenv=${appsite_path}\n\n# set root\ncd ${env}\nsetr\n\n# set routes\ncd ${gr}/app\nseto\n\n# set models\ncd ${go}/models\nsetm\n\n# set views\ncd ${go}/views\nsetv\n\n# set controllers\ncd ${go}/controllers\nsetc\n\n# set db alters\ncd ${go}/database/migrations\nsetd\n\n# set public\ncd ${gr}/public\nsetp\n\n# set compiled js\ncd ${gp}/packages/js\nsetj\nsetjj\n\n# set style\ncd ${gp}/packages/css\nsets\nsetss\n\n# set tests\ncd ${go}/tests\nsett\n\n# set laravel source\ncd ${gr}/vendor/laravel/framework/src/Illuminate\nsetl\n\n# set assets\ncd ${phoenix_assets_path}/2017\nsetA\n\n# set documentation\ncd ${work}/documentation/appsite\nsetD\n\n# set docker\ncd ~/dev_docker\nsetV\n\ngr\n" }, { "alpha_fraction": 0.5175096988677979, "alphanum_fraction": 0.5214007496833801, "avg_line_length": 21.34782600402832, "blob_id": "110dd734754b5e0ca001b48b9c091d592b26f5a4", "content_id": "69277a3d4c126a2b314fa699c46ccbdcabb8eecc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 514, "license_type": "no_license", "max_line_length": 69, "num_lines": 23, "path": "/_bootstrap.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# BOOTSTRAP/VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports $bash_exports\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases $bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n exit\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n" }, { "alpha_fraction": 0.3835951089859009, "alphanum_fraction": 0.3835951089859009, "avg_line_length": 15.659883499145508, "blob_id": "94e8399282b8bf72210e6bbc82e1e3aa9ab1e521", "content_id": "1bd0815cd5f8680921fa659ae852bbcea3b2b592", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5730, "license_type": "no_license", "max_line_length": 67, "num_lines": 344, "path": "/envs-report.zsh", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/zsh\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nzsh_exports=''\nzsh_aliases=''\n\nif [[ ! -z $zsh_exports ]] || [[ ! -z $zsh_aliases ]]; then\n source ${xec}/_bootstrap.zsh\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# REPORT SHELL NAVIGATION VARIABLES\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nenvs=()\n\necho ''\n\nif [ -n \"$gAA\" ]; then\n envs+=( 'AA: '$gAA )\nfi\nif [ -n \"$gBB\" ]; then\n envs+=( 'BB: '$gBB )\nfi\nif [ -n \"$gCC\" ]; then\n envs+=( 'CC: '$gCC )\nfi\nif [ -n \"$gDD\" ]; then\n envs+=( 'DD: '$gDD )\nfi\nif [ -n \"$gEE\" ]; then\n envs+=( 'EE: '$gEE )\nfi\nif [ -n \"$gFF\" ]; then\n envs+=( 'FF: '$gFF )\nfi\nif [ -n \"$gGG\" ]; then\n envs+=( 'GG: '$gGG )\nfi\nif [ -n \"$gHH\" ]; then\n envs+=( 'HH: '$gHH )\nfi\nif [ -n \"$gII\" ]; then\n envs+=( 'II: '$gII )\nfi\nif [ -n \"$gJJ\" ]; then\n envs+=( 'JJ: '$gJJ )\nfi\nif [ -n \"$gKK\" ]; then\n envs+=( 'KK: '$gKK )\nfi\nif [ -n \"$gLL\" ]; then\n envs+=( 'LL: '$gLL )\nfi\nif [ -n \"$gMM\" ]; then\n envs+=( 'MM: '$gMM )\nfi\nif [ -n \"$gNN\" ]; then\n envs+=( 'NN: '$gNN )\nfi\nif [ -n \"$gOO\" ]; then\n envs+=( 'OO: '$gOO )\nfi\nif [ -n \"$gPP\" ]; then\n envs+=( 'PP: '$gPP )\nfi\nif [ -n \"$gQQ\" ]; then\n envs+=( 'QQ: '$gQQ )\nfi\nif [ -n \"$gRR\" ]; then\n envs+=( 'RR: '$gRR )\nfi\nif [ -n \"$gSS\" ]; then\n envs+=( 'SS: '$gSS )\nfi\nif [ -n \"$gTT\" ]; then\n envs+=( 'TT: '$gTT )\nfi\nif [ -n \"$gUU\" ]; then\n envs+=( 'UU: '$gUU )\nfi\nif [ -n \"$gVV\" ]; then\n envs+=( 'VV: '$gVV )\nfi\nif [ -n \"$gWW\" ]; then\n envs+=( 'WW: '$gWW )\nfi\nif [ -n \"$gXX\" ]; then\n envs+=( 'XX: '$gXX )\nfi\nif [ -n \"$gYY\" ]; then\n envs+=( 'YY: '$gYY )\nfi\nif [ -n \"$gZZ\" ]; then\n envs+=( 'ZZ: '$gZZ )\nfi\n\nif [ -n \"$gA\" ]; then\n envs+=( 'A: '$gA )\nfi\nif [ -n \"$gB\" ]; then\n envs+=( 'B: '$gB )\nfi\nif [ -n \"$gC\" ]; then\n envs+=( 'C: '$gC )\nfi\nif [ -n \"$gD\" ]; then\n envs+=( 'D: '$gD )\nfi\nif [ -n \"$gE\" ]; then\n envs+=( 'E: '$gE )\nfi\nif [ -n \"$gF\" ]; then\n envs+=( 'F: '$gF )\nfi\nif [ -n \"$gG\" ]; then\n envs+=( 'G: '$gG )\nfi\nif [ -n \"$gH\" ]; then\n envs+=( 'H: '$gH )\nfi\nif [ -n \"$gI\" ]; then\n envs+=( 'I: '$gI )\nfi\nif [ -n \"$gJ\" ]; then\n envs+=( 'J: '$gJ )\nfi\nif [ -n \"$gK\" ]; then\n envs+=( 'K: '$gK )\nfi\nif [ -n \"$gL\" ]; then\n envs+=( 'L: '$gL )\nfi\nif [ -n \"$gM\" ]; then\n envs+=( 'M: '$gM )\nfi\nif [ -n \"$gN\" ]; then\n envs+=( 'N: '$gN )\nfi\nif [ -n \"$gO\" ]; then\n envs+=( 'O: '$gO )\nfi\nif [ -n \"$gP\" ]; then\n envs+=( 'P: '$gP )\nfi\nif [ -n \"$gQ\" ]; then\n envs+=( 'Q: '$gQ )\nfi\nif [ -n \"$gR\" ]; then\n envs+=( 'R: '$gR )\nfi\nif [ -n \"$gS\" ]; then\n envs+=( 'S: '$gS )\nfi\nif [ -n \"$gT\" ]; then\n envs+=( 'T: '$gT )\nfi\nif [ -n \"$gU\" ]; then\n envs+=( 'U: '$gU )\nfi\nif [ -n \"$gV\" ]; then\n envs+=( 'V: '$gV )\nfi\nif [ -n \"$gW\" ]; then\n envs+=( 'W: '$gW )\nfi\nif [ -n \"$gX\" ]; then\n envs+=( 'X: '$gX )\nfi\nif [ -n \"$gY\" ]; then\n envs+=( 'Y: '$gY )\nfi\nif [ -n \"$gZ\" ]; then\n envs+=( 'Z: '$gZ )\nfi\n\nif [ -n \"$gaa\" ]; then\n envs+=( 'aa: '$gaa )\nfi\nif [ -n \"$gbb\" ]; then\n envs+=( 'bb: '$gbb )\nfi\nif [ -n \"$gcc\" ]; then\n envs+=( 'cc: '$gcc )\nfi\nif [ -n \"$gdd\" ]; then\n envs+=( 'dd: '$gdd )\nfi\nif [ -n \"$gee\" ]; then\n envs+=( 'ee: '$gee )\nfi\nif [ -n \"$gff\" ]; then\n envs+=( 'ff: '$gff )\nfi\nif [ -n \"$ggg\" ]; then\n envs+=( 'gg: '$ggg )\nfi\nif [ -n \"$ghh\" ]; then\n envs+=( 'hh: '$ghh )\nfi\nif [ -n \"$gii\" ]; then\n envs+=( 'ii: '$gii )\nfi\nif [ -n \"$gjj\" ]; then\n envs+=( 'jj: '$gjj )\nfi\nif [ -n \"$gkk\" ]; then\n envs+=( 'kk: '$gkk )\nfi\nif [ -n \"$gll\" ]; then\n envs+=( 'll: '$gll )\nfi\nif [ -n \"$gmm\" ]; then\n envs+=( 'mm: '$gmm )\nfi\nif [ -n \"$gnn\" ]; then\n envs+=( 'nn: '$gnn )\nfi\nif [ -n \"$goo\" ]; then\n envs+=( 'oo: '$goo )\nfi\nif [ -n \"$gpp\" ]; then\n envs+=( 'pp: '$gpp )\nfi\nif [ -n \"$gqq\" ]; then\n envs+=( 'qq: '$gqq )\nfi\nif [ -n \"$grr\" ]; then\n envs+=( 'rr: '$grr )\nfi\nif [ -n \"$gss\" ]; then\n envs+=( 'ss: '$gss )\nfi\nif [ -n \"$gtt\" ]; then\n envs+=( 'tt: '$gtt )\nfi\nif [ -n \"$guu\" ]; then\n envs+=( 'uu: '$guu )\nfi\nif [ -n \"$gvv\" ]; then\n envs+=( 'vv: '$gvv )\nfi\nif [ -n \"$gww\" ]; then\n envs+=( 'ww: '$gww )\nfi\nif [ -n \"$gxx\" ]; then\n envs+=( 'xx: '$gxx )\nfi\nif [ -n \"$gyy\" ]; then\n envs+=( 'yy: '$gyy )\nfi\nif [ -n \"$gzz\" ]; then\n envs+=( 'zz: '$gzz )\nfi\n\nif [ -n \"$ga\" ]; then\n envs+=( 'a: '$ga )\nfi\nif [ -n \"$gb\" ]; then\n envs+=( 'b: '$gb )\nfi\nif [ -n \"$gc\" ]; then\n envs+=( 'c: '$gc )\nfi\nif [ -n \"$gd\" ]; then\n envs+=( 'd: '$gd )\nfi\nif [ -n \"$ge\" ]; then\n envs+=( 'e: '$ge )\nfi\nif [ -n \"$gf\" ]; then\n envs+=( 'f: '$gf )\nfi\nif [ -n \"$gg\" ]; then\n envs+=( 'g: '$gg )\nfi\nif [ -n \"$gh\" ]; then\n envs+=( 'h: '$gh )\nfi\nif [ -n \"$gi\" ]; then\n envs+=( 'i: '$gi )\nfi\nif [ -n \"$gj\" ]; then\n envs+=( 'j: '$gj )\nfi\nif [ -n \"$gk\" ]; then\n envs+=( 'k: '$gk )\nfi\nif [ -n \"$gl\" ]; then\n envs+=( 'l: '$gl )\nfi\nif [ -n \"$gm\" ]; then\n envs+=( 'm: '$gm )\nfi\nif [ -n \"$gn\" ]; then\n envs+=( 'n: '$gn )\nfi\nif [ -n \"$go\" ]; then\n envs+=( 'o: '$go )\nfi\nif [ -n \"$gp\" ]; then\n envs+=( 'p: '$gp )\nfi\nif [ -n \"$gq\" ]; then\n envs+=( 'q: '$gq )\nfi\nif [ -n \"$gr\" ]; then\n envs+=( 'r: '$gr )\nfi\nif [ -n \"$gs\" ]; then\n envs+=( 's: '$gs )\nfi\nif [ -n \"$gt\" ]; then\n envs+=( 't: '$gt )\nfi\nif [ -n \"$gu\" ]; then\n envs+=( 'u: '$gu )\nfi\nif [ -n \"$gv\" ]; then\n envs+=( 'v: '$gv )\nfi\nif [ -n \"$gw\" ]; then\n envs+=( 'w: '$gw )\nfi\nif [ -n \"$gx\" ]; then\n envs+=( 'x: '$gx )\nfi\nif [ -n \"$gy\" ]; then\n envs+=( 'y: '$gy )\nfi\nif [ -n \"$gz\" ]; then\n envs+=( 'z: '$gz )\nfi\n\nfor env in \"${envs[@]}\"; do\n echo $env | sed -e \"s|${HOME}|~|\"\ndone\n\necho ''" }, { "alpha_fraction": 0.2979066073894501, "alphanum_fraction": 0.2995169162750244, "avg_line_length": 18.40625, "blob_id": "9115893d231c777b71f579bf53c9819562ff2044", "content_id": "1a35d4ca9c1d37ee4aa4feee403cbc777cec9a32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 621, "license_type": "no_license", "max_line_length": 67, "num_lines": 32, "path": "/copy.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# COPY PRESENT WORKING DIRECTORY\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ncmd=''\n\nif [[ -z \"$1\" ]]; then\n cmd='pwd'\nelse\n for arg in \"$@\"; do\n cmd=\"${cmd} ${arg}\"\n done\nfi\n\n${cmd} | pbcopy\n\nexit\n" }, { "alpha_fraction": 0.4900497496128082, "alphanum_fraction": 0.5074626803398132, "avg_line_length": 27.714284896850586, "blob_id": "df51498ca11306eb525ad738fae83b6b6b31e83f", "content_id": "104dbd6dd1240c07bc7f9cb4e0a3ff56f0421c73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 402, "license_type": "no_license", "max_line_length": 85, "num_lines": 14, "path": "/open-macvim-tab.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nlastArg=\"${@: -1}\"\n\nif [[ $lastArg =~ ^-?[0-9]+$ ]]; then\n fileNames=${@:1:$#-1}\n mvim --servername VIM${lastArg} --remote-tab ${fileNames}\nelif [[ \"$vimwin\" ]]; then\n fileNames=${@:1} # export vimwin=1\n mvim --servername VIM${vimwin} --remote-tab ${fileNames}\nelse\n fileNames=\"$@\"\n mvim --remote-tab-silent ${fileNames}\nfi\n" }, { "alpha_fraction": 0.457092821598053, "alphanum_fraction": 0.45971977710723877, "avg_line_length": 24.377777099609375, "blob_id": "2450ccf74ffaeec88b7a90f6573f225f9f37fe3d", "content_id": "f856bf172e699cbaa8776011f272bffbdc22ff63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1142, "license_type": "no_license", "max_line_length": 374, "num_lines": 45, "path": "/archives/unsetenv.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# UNSET SHELL NAVIGATION VARIABLES\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nif [[ -n \"$1\" ]]; then\n\n envs=(\"g$1\")\n msg=\" $1 variable cleared\"\n\nelse\n\n envs=(ga gA gaa gAA gb gB gbb gBB gc gC gcc gCC gd gD gdd gDD ge gE gee gEE gf gF gff gFF gg gG ggg gGG gh gH ghh gHH gi gI gii gII gj gJ gjj gJJ gk gK gkk gKK gl gL gll gLL gm gM gmm gMM gn gN gnn gNN go gO goo gOO gp gP gpp gPP gq gQ gqq gQQ gr gR grr gRR gs gS gss gSS gt gT gtt gTT gu gU guu gUU gv gV gvv gVV gw gW gww gWW gx gX gxx gXX gy gY gyy gYY gz gZ gzz gZZ)\n msg=\" ...environment variables cleared\"\n\nfi\n\nfor env in \"${envs[@]}\"; do\n\n if [ -n \"${!env}\" ]; then\n\n unalias ${env}\n unset ${env}\n\n fi\n\ndone\n\necho ''\necho ${msg}\necho ''\n" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 6.800000190734863, "blob_id": "a8d4a8605d7645c8156b89197b221d6a3e52d7dd", "content_id": "318e37d334d165712c40df5b3859d631b1dde8ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 39, "license_type": "no_license", "max_line_length": 19, "num_lines": 5, "path": "/projects/bs/get-gift-card-code.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho 'WOW' | pbcopy\n\nexit\n" }, { "alpha_fraction": 0.6502732038497925, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 9.764705657958984, "blob_id": "c043c4017bdb4263e58570cd6c0077556d3d3825", "content_id": "0a636b323ed7ae7de68f90b29cf11a99efbe75f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 366, "license_type": "no_license", "max_line_length": 41, "num_lines": 34, "path": "/environments/archives/fg/server-brew.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# custom path (hardcoded)\nenv='phoenix_beta'\n\n# set root\ncd ${ll}/sites/${env}\nsetr\n\n# set project config\ncd ${gr}/application/config/\nseth\n\n# set etc\ncd /usr/local/etc/\nsete\n\n# set nginx\ncd ${ge}/nginx/\nsetn\n\n# set php\ncd ${ge}/php/5.5/\nsetp\n\n# set php extensions\ncd ${gp}/conf.d/\nsetpp\n\n# set assets\ncd ~/work/assets/phoenix_beta/2015/setup/\nsetA\n\ngr\n" }, { "alpha_fraction": 0.44823530316352844, "alphanum_fraction": 0.4505882263183594, "avg_line_length": 20.274999618530273, "blob_id": "d9aa34e3c4809521a9db5b264f6f8eee81ec6050", "content_id": "8e1bd2b674bea4eb92699eebc2c69c873e79094e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 850, "license_type": "no_license", "max_line_length": 94, "num_lines": 40, "path": "/projects/fg/phoenix-db-dump.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports gr\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n exit\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# DUMP LOS DB\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nhost='fg_mysql'\nuser='root'\npassword='financegenius'\ndatabase='fg_los'\n\necho ''\necho '...dumping current DB'\nmysqldump -h ${host} -u ${user} -p${password} ${database} > ${gr}/`date +%Y-%m-%d`_phoenix.sql\necho ''\n\nexit" }, { "alpha_fraction": 0.5348659157752991, "alphanum_fraction": 0.5363984704017639, "avg_line_length": 10.34782600402832, "blob_id": "834a5b9cb49d1f3e1fa7c9cbb6af81eb6be22d84", "content_id": "aedf5351759dc548a60db6072a16a87dbf7d9049", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1305, "license_type": "no_license", "max_line_length": 54, "num_lines": 115, "path": "/archives/environments/vag_wordpress.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nif [[ -z \"$1\" ]]; then\n\n echo 'Which project? '\n read env\n\nelse\n env=\"$1\"\nfi\n\n# remove all slashes\nenv=`echo $env | sed 's/[/]//g'`\n\n# extract the project name\nproj=`echo $env | sed 's/_.*//'`\n\n# set vagrant\ncd ${code}/app_${env}\nsetv\n\n# set root\ncd ${code}/${env}\nsetr\n\ncd ${gr}/site\n\n# set plugins\nif [[ -d 'wp-content' || -d 'content' ]]; then\n\n cd ${gr}/site/*content*/plugins\n setp\n\nelif [ -d 'assets' ]; then\n\n cd ${gr}/site/*assets*/plugins\n setp\n\nfi\n\ncd ../themes\nsett\n\n# set parent theme\nif [ -d 'skeleton' ]; then\n\n cd skeleton\n sethh\n cd ..\n\nelif [ -d '*skeleton*' ]; then\n\n cd *skeleton*\n sethh\n cd ..\n\nfi\n\nfound_home=false\n\n# set home\nif [ -d $env ]; then\n\n found_home=true\n cd $env\n seth\n\nelif [ -d $proj ]; then\n\n found_home=true\n cd $proj\n seth\n\nelse\n echo 'Could not locate content directory'\nfi\n\n# set style\nif $found_home; then\n\n cd _\n\n if [ -d 'scss' ]; then\n\n cd scss\n sets\n\n elif [ -d 'sass' ]; then\n\n cd sass\n sets\n\n fi\n\nfi\n\n# set assets\nif [ -d \"/Users/steele/projects/assets/${env}\" ]; then\n\n cd /Users/steele/projects/assets/${env}\n year=`date \"+%Y\"`\n\n if [ -d $year ]; then\n cd $year\n fi\n\n seta\n\nfi\n\nif $found_home; then\n gh\nelse\n gt\nfi\n" }, { "alpha_fraction": 0.40979689359664917, "alphanum_fraction": 0.4121863842010498, "avg_line_length": 19.924999237060547, "blob_id": "98a8a07aed2c5baa19c5d23b8e14781fb9626323", "content_id": "661d3146bdba18f99fa405e27556b69a89f1e5f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 837, "license_type": "no_license", "max_line_length": 76, "num_lines": 40, "path": "/www.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports='ss'\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# OPEN LISTED URLS IN A BROWSER\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# Determine project\nif [[ -n \"$1\" ]]; then\n url=\"$1\"\nelse\n echo -n \"URL file: \"\n read url\nfi\n\n# Build the path to the url flat file\npath=\"${ss}/urls/${url}\"\n\n# Filter out commented out URLs\n${xec}/www.pl ${path}\n\n# Open links\n/usr/bin/open -a /Applications/Google\\ Chrome.app/ `/bin/cat ~/url_temp.txt`\n\n# Clean up\nrm -fr ~/url_temp.txt\n\nexit\n" }, { "alpha_fraction": 0.4038461446762085, "alphanum_fraction": 0.7115384340286255, "avg_line_length": 9.399999618530273, "blob_id": "e2835b428d143f5b2ce2a461592909bf0e1b2393", "content_id": "790f4998136c2827db8b42990f2dd2490923b94e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 52, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/projects/bs/get-credit-card-number.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho '4111111111111111' | pbcopy\n\nexit\n" }, { "alpha_fraction": 0.6925305724143982, "alphanum_fraction": 0.6957501769065857, "avg_line_length": 27.236364364624023, "blob_id": "f8d11091f256b851016875ee117fc7f230a65d7e", "content_id": "8f2433ff0400087b7d918fac8d5f7bd0f617187f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3106, "license_type": "no_license", "max_line_length": 118, "num_lines": 110, "path": "/documentation/macvim.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "$xec/open-macvim-tab.bash\nopen -a MacVim\nmvim\nmvim --remote-tab-silent\nmvim -v # works in terminal\n\n\n# v/vv: initialize macvim; open new window w/ tabs\n\n# fails: a bunch of errors\n$xec/open-macvim-tab.bash $xec/highlight-all.csh $xec/highlight-all.pl $xec/highlight-wp.csh\n\n# works\nopen -a MacVim $xec/highlight-all.csh $xec/highlight-all.pl $xec/highlight-wp.csh\n\n# fails: ...3 files to edit\nmvim $xec/highlight-all.csh $xec/highlight-all.pl $xec/highlight-wp.csh\n\n# works\nmvim --remote-tab-silent $xec/highlight-all.csh $xec/highlight-all.pl $xec/highlight-wp.csh\n\n\n\n# v: open new tab on the open window\n\n# works\n$xec/open-macvim-tab.bash ~/Desktop/vimrc.orig\n\n# fails: opens new window\nopen -a MacVim ~/Desktop/vimrc.orig\n\n# fails: opens new window\nmvim ~/Desktop/vimrc.orig\n\n# works\nmvim --remote-tab-silent ~/Desktop/vimrc.orig\n\n\n\n# vv: existing macvim; open new window w/ tabs\n\n# fails: a bunch of errors\n$xec/open-macvim-tab.bash $xec/create_password_full.pl $xec/create_password_string.pl $xec/create_password_abridged.pl\n\n# works\nopen -a MacVim $xec/create_password_full.pl $xec/create_password_string.pl $xec/create_password_abridged.pl\n\n# fails: ...3 files to edit\nmvim $xec/create_password_full.pl $xec/create_password_string.pl $xec/create_password_abridged.pl\n\n# fails: opens in same window as original\nmvim --remote-tab-silent $xec/create_password_full.pl $xec/create_password_string.pl $xec/create_password_abridged.pl\n\n\n\n# v: open new tab on 2nd open window\n\n# fails: opens new tab in 1st window\n$xec/open-macvim-tab.bash ~/Desktop/vimrc.orig\n\n# works\n$xec/open-macvim-tab.bash ~/Desktop/vimrc.orig 1\n\n# fails: opens new window\nopen -a MacVim ~/Desktop/vimrc.orig\n\n# fails: opens new window\nmvim ~/Desktop/vimrc.orig\n\n# fails: opens new tab in 1st window\nmvim --remote-tab-silent ~/Desktop/vimrc.orig\n\n\n\n# `````````````````````````````````````````````````````````````````\n\n\n\n# v/vv: initialize macvim; open new window w/ tabs\nopen -a MacVim $xec/highlight-all.csh $xec/highlight-all.pl $xec/highlight-wp.csh\nmvim --remote-tab-silent $xec/highlight-all.csh $xec/highlight-all.pl $xec/highlight-wp.csh\n\n# v: open new tab on the open window\n$xec/open-macvim-tab.bash ~/Desktop/vimrc.orig\nmvim --remote-tab-silent ~/Desktop/vimrc.orig\n\n# vv: existing macvim; open new window w/ tabs\nopen -a MacVim $xec/create_password_full.pl $xec/create_password_string.pl $xec/create_password_abridged.pl\n\n# v: open new tab on 2nd open window\n$xec/open-macvim-tab.bash ~/Desktop/vimrc.orig 1\n\n\n\n# `````````````````````````````````````````````````````````````````\n\n\n\n# v/vv: initialize macvim; open new window w/ tabs\nvv $xec/highlight-all.csh $xec/highlight-all.pl $xec/highlight-wp.csh\n# mvim --remote-tab-silent $xec/highlight-all.csh $xec/highlight-all.pl $xec/highlight-wp.csh\n\n# v: open new tab on the open window\nv ~/Desktop/vimrc.orig\n\n# vv: existing macvim; open new window w/ tabs\nvv $xec/create_password_full.pl $xec/create_password_string.pl $xec/create_password_abridged.pl\n\n# v: open new tab on 2nd open window\nv ~/Desktop/vimrc.orig 1\n" }, { "alpha_fraction": 0.36991870403289795, "alphanum_fraction": 0.37262871861457825, "avg_line_length": 22.0625, "blob_id": "c1f99e53b6ce267c9c910ea05842849f7cb923c5", "content_id": "2e81121631481e5c062b18f659d85c7b9d728e17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 738, "license_type": "no_license", "max_line_length": 67, "num_lines": 32, "path": "/search-concat-results.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases='search'\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# JOIN SEARCH RESULTS LIST (ONE-PER-LINE) ONTO A SINGLE LINE\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nif [[ -n \"$1\" ]]; then\n searchTerm=\"$1\"\nelse\n echo ''\n echo \"Please feed me lines of input\"\n echo ''\n exit\nfi\n\noutput=`search --nocolor ${searchTerm} | tr '\\n' ' '`\n\necho ${output}\necho ${output} | pbcopy\n" }, { "alpha_fraction": 0.5194401144981384, "alphanum_fraction": 0.5334370136260986, "avg_line_length": 17.371429443359375, "blob_id": "e48da62a00fc7216dd97df810ef0bcef0ed7e3d0", "content_id": "6ca48f51ac451745df1c40dda1d3051072d79ea4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 643, "license_type": "no_license", "max_line_length": 60, "num_lines": 35, "path": "/verify-bash-variables.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsource ~/.bashrc\n\n\n# return 1 if passed in strings are exported variables\n# ...0 otherwise\nfunction verify_bash_exports {\n valid=1\n\n for var in \"$@\"; do\n if [[ -z \"${!var+x}\" ]]; then\n echo \" \\$${var} has not been exported\"\n valid=0\n fi\n done\n\n return ${valid}\n}\n\n\n# return 1 if passed in strings are properly aliased\n# ...0 otherwise\nfunction verify_bash_aliases {\n valid=1\n\n for var in \"$@\"; do\n if [[ ! `alias ${var} 2>/dev/null` ]]; then\n echo \" '${var}' has not been properly aliased\"\n valid=0\n fi\n done\n\n return ${valid}\n}\n" }, { "alpha_fraction": 0.5860373377799988, "alphanum_fraction": 0.588003933429718, "avg_line_length": 22.65116310119629, "blob_id": "1823354e90a087dab078cf5472d0a704d7cc4296", "content_id": "972e5337873c28cbf5b6c7c8ecb6ac0bef97abdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "no_license", "max_line_length": 106, "num_lines": 43, "path": "/alphabetize-list.py", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# todo: use command-line tool instead\n# sort unsorted.txt > ~/.temp # sort: many sorting options available\n# uniq sorted.txt ~/.temp # dedupe only\n\nimport sys, subprocess\n\nif len(sys.argv) >= 2:\n\n # assign from command line arg\n fi_name = sys.argv[1]\n\nelse:\n\n # assign from prompt\n print('Path to file: ')\n fi_name = sys.stdin.readline().rstrip()\n\n# get absolute file path\npath = subprocess.check_output('pwd', shell=True).rstrip()\nfi_path = path + '/' + fi_name\n\n# open 'in' file, read, and sort\nlines = [line.rstrip() for line in open(fi_path)]\nlines.sort()\n\n# open 'out' file\npath = subprocess.check_output('cd ~; pwd', shell=True).rstrip()\nfile_path = path + '/.temp'\nfo = open(file_path, 'w')\n\n# write out\nprev_line = ''\nfor i in lines:\n\n # remove duplicate entries\n if i != prev_line:\n fo.writelines([i, \"\\n\"])\n\n prev_line = i\n\nprint('\\n Alphabetized and written to ~/.temp\\n')\n" }, { "alpha_fraction": 0.7552447319030762, "alphanum_fraction": 0.7552447319030762, "avg_line_length": 22.83333396911621, "blob_id": "f1099e1a9c7194c523c89753b7f0913bbc685129", "content_id": "78d64c9f9cf050bce1b8b2f1b7e8f327b10570e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 143, "license_type": "no_license", "max_line_length": 41, "num_lines": 6, "path": "/new-term.command", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "osascript -e 'tell application \"Terminal\"\n\tactivate\n\ttell application \"System Events\"\n\t\tkeystroke \"n\" using {command down}\n\tend tell\nend tell'\n" }, { "alpha_fraction": 0.5577957034111023, "alphanum_fraction": 0.5920698642730713, "avg_line_length": 15.17391300201416, "blob_id": "62ae8995f074cf27ca86bbab1d2e8556008afc11", "content_id": "2064123b700b5ece61e11012544cc39b513fd6cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1488, "license_type": "no_license", "max_line_length": 131, "num_lines": 92, "path": "/create-color-strips.py", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\n\n# Handle input\nif (len(sys.argv) > 1):\n in_file = str(sys.argv[1])\nelse:\n in_file = raw_input('\\n Enter color definition filename: ')\n print('')\n\ntry:\n f_in = open(in_file)\nexcept:\n print(' File not found\\n')\n exit()\n\ncolors = f_in.read()\ncolorboxes = ''\n\n# Convert color string into array\ncolors = colors.split('\\n')\n\n# Remove blank (first and last) elements\ncolors.pop(0)\ncolors.pop(-1)\n\nfor c in colors:\n\n entry = c.split(':')\n\n # Strip '$' from name\n name = entry[0][1:]\n\n # Strip ';' from code\n code = entry[1][:-1]\n\n colorboxes += '<div class=\"colorbox\" style=\"background-color:' + code + '\"><span>' + name + '<br />' + code + '</span></div>\\n'\n\n# HTML markup\no = \"\"\"<!doctype html>\n<html lang=\"en\">\n<head>\n<title>Color Reference</title>\n<meta charset=\"utf-8\">\n<style type=\"text/css\">\n\nbody {\n background:#fff url('img/white_carbon.png');\n}\n\n.colorbox {\n position:relative;\n width:90%;\n height:100px;\n margin:10px auto;\n border-radius:3px;\n}\n\n.colorbox span {\n position:absolute;\n right:0;\n width:125px;\n padding:48px 10px 10px 0;\n text-align:right;\n line-height:20px;\n font-family:\"Trebuchet MS\";\n font-size:16px;\n color:#333;\n background-color:rgba(255, 255, 255, 0.8);\n border:1px solid #ddd;\n border-radius:0 3px 3px 0;\n}\n\n</style>\n\n</head>\n<body>\n\n\n\"\"\"\n\no += colorboxes\n\no += \"\"\"\n\n</body>\n</html>\n\"\"\"\n\nf_out = open('colors.html', 'w')\nf_out.write(o)\n" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 8, "blob_id": "23ee26a0d659d0986c6b8ea7492e50859e8acca0", "content_id": "269638d91b2ad97b7fe9a9ad013069ae694838e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 45, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/projects/fg/get-ssn.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho '666620432' | pbcopy\n\nexit\n" }, { "alpha_fraction": 0.5776081681251526, "alphanum_fraction": 0.5814249515533447, "avg_line_length": 13.422018051147461, "blob_id": "46f5f01d9c5705b391f02ec81bfa762a7a73e21e", "content_id": "0e78c355c5718ca1e8755c7dd1bfaa3886a37fe9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1572, "license_type": "no_license", "max_line_length": 69, "num_lines": 109, "path": "/environments/archives/fg/phoenix.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports phoenix_path phoenix_assets_path work dt\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n return\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# SET FG PHOENIX ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# custom site\nenv=${phoenix_path}\n\n# set root\ncd ${env}\nsetr\n\n# set FG\ncd ${gr}/application/libraries/Fg\nsetf\n\n# set config\ncd ${gr}/application/config\nsetrr\n\n# set controllers\ncd ${gr}/application/controllers\nsetc\n\n# set entities\ncd ${gf}/Entity\nsete\n\n# set models\ncd ${gf}/Model\nsetm\n\n# set viewmodels\ncd ${gf}/ViewModel\nsetvv\n\n# set views\ncd ${gr}/application/views\nsetv\n\n# set services\ncd ${gf}/Service\nsets\n\n# # set attributes\n# cd ${gf}/Ade/Attribute\n# seta\n\n# set js\ncd ${gr}/assets/js/fg\nsetj\n\n# set tests\ncd ${gr}/application/tests\nsett\n\n# set proxies\ncd ${gr}/application/models/Proxies\nsetp\n\n# set db alters\ncd ${gr}/db/alters\nsetd\n\n# set logs\ncd ${gr}/application/logs\nsetl\n\n# set assets\ncd ${phoenix_assets_path}/2017\nsetA\n\n# set documentation\ncd ${work}/documentation/phoenix_beta\nsetD\n\n# set docker\ncd ~/dev_docker\nsetV\n\n# set current\ncd ${dt}/desk/current\nsetC\n\ngr\n" }, { "alpha_fraction": 0.39855071902275085, "alphanum_fraction": 0.4021739065647125, "avg_line_length": 22.657142639160156, "blob_id": "f51cfb253df67351d3af6df75b482d7aaf33fc40", "content_id": "ae9508e32c2e3ae23b86a89e9592f698f884ffa8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 828, "license_type": "no_license", "max_line_length": 67, "num_lines": 35, "path": "/git-branch-copy.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# GIT BRANCH COPY\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nif [[ -n \"$1\" ]]; then\n uniqueName=\"$1\"\nelse\n echo -n \"Unique name: \"\n read uniqueName\nfi\n\ncurrentBranch=\"`git branch | grep \\* | cut -d ' ' -f2`\"\nnewBranch=\"!${currentBranch}-${uniqueName}\"\n\nif [[ $(git rev-parse --verify --quiet \"${newBranch}\") ]]; then\n git branch -D ${newBranch}\nfi\n\ngit branch ${newBranch}\necho \"Created branch ${newBranch}\"\n" }, { "alpha_fraction": 0.2787524461746216, "alphanum_fraction": 0.2787524461746216, "avg_line_length": 22.272727966308594, "blob_id": "82e13c8ee355dd10dcb0bc63c3dcafa100d43ce7", "content_id": "069f1803c2af1b413767dcfb2d1516f39b6626a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 513, "license_type": "no_license", "max_line_length": 67, "num_lines": 22, "path": "/git-init.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# GIT INIT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ngit init\ngit add .\ngit commit -m 'Initial commit'\n\n" }, { "alpha_fraction": 0.8484848737716675, "alphanum_fraction": 0.8484848737716675, "avg_line_length": 66, "blob_id": "71181fd5e037c713717156d7677a346e60622166", "content_id": "461de9dac13c98b8ce01c58fafc1967e15d0828f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 66, "license_type": "no_license", "max_line_length": 66, "num_lines": 1, "path": "/dot-env/.bashrc", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "/Users/stevensteele/ssteele/_archive/bash/config/li-macbook.bashrc" }, { "alpha_fraction": 0.32376396656036377, "alphanum_fraction": 0.3285486400127411, "avg_line_length": 19.899999618530273, "blob_id": "571291990aae9ce76915b49ed0420ee4cdb6a60a", "content_id": "d5c88d4f6cc3ee6075ac9d07073a7e9df71900fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 627, "license_type": "no_license", "max_line_length": 67, "num_lines": 30, "path": "/create-vim-task.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases='vv'\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# CREATE VIM TASK\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# get task id\nif [[ -n \"$1\" ]]; then\n task_id=\"$1\"\nelse\n echo 'No task ID passed in by create_task.bash'\n exit\nfi\n\nvv ${task_id}/0_notes.txt\n\nexit\n" }, { "alpha_fraction": 0.4867830276489258, "alphanum_fraction": 0.50922691822052, "avg_line_length": 26.84722137451172, "blob_id": "56bb78fb66d5a29e8311c4c8efc46263d84b8f7f", "content_id": "968ee552088af8d66764d8221be97455b04706de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2005, "license_type": "no_license", "max_line_length": 115, "num_lines": 72, "path": "/filter-sql-subdomain.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# UPDATE URL PREPEND IN A DB DUMP\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# # debug\n# site='harvillesteele.com'\n# sql_file='2018-06-09_harville_recipes_prod_wp.sql'\n# old_sql_prepend='www'\n# new_sql_prepend='shs'\n# old_local_port=''\n# new_local_port='8888'\n\necho ''\nif [[ -n \"$1\" ]]; then\n sql_file=\"$1\"\n echo \" sql file (eg: valero_2012-08-30.sql) => ${sql_file}\"\nelse\n echo -n ' sql file (eg: valero_2012-08-30.sql) => '\n read sql_file\nfi\necho -n ' site (eg: valerotexasopen.org) => '\nread site\necho -n ' old db prepend (eg: www) => '\nread old_sql_prepend\necho -n ' new db prepend (eg: loc) => '\nread new_sql_prepend\necho -n ' old local port (eg: 8888) => '\nread old_local_port\necho -n ' new local port (eg: 8888) => '\nread new_local_port\n\nignore_locale=true\nif $ignore_locale; then\n export LC_CTYPE=C\n export LANG=C\nfi\n\n${sed} sed s,${old_sql_prepend}.${site},${site},g ${sql_file} > ${sql_file}.temp1\n\nif [[ -n $old_local_port ]]; then\n sed s,${site}:${old_local_port},${site},g ${sql_file}.temp1 > ${sql_file}.temp2\nelse\n cp ${sql_file}.temp1 ${sql_file}.temp2\nfi\n\nif [[ -n $new_local_port ]]; then\n sed s,${site},${new_sql_prepend}.${site}:${new_local_port},g ${sql_file}.temp2 > ${new_sql_prepend}_${sql_file}\nelse\n sed s,${site},${new_sql_prepend}.${site},g ${sql_file}.temp2 > ${new_sql_prepend}_${sql_file}\nfi\n\nrm -fr ${sql_file}.temp*\n\necho ''\necho 'Now get in there and do something like:'\necho \"UPDATE wp_users SET user_email = '[email protected]' WHERE user_email != '[email protected]';\"\necho ''\n" }, { "alpha_fraction": 0.4968354403972626, "alphanum_fraction": 0.503164529800415, "avg_line_length": 16.88679313659668, "blob_id": "76ea917b9be3dbca7a145dec498506db34ac6dfa", "content_id": "472ae1d8ed47ee56d694d5ba88f7ef427d36408c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 948, "license_type": "no_license", "max_line_length": 69, "num_lines": 53, "path": "/environments/archives/fg/gateway.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports gateway_path phoenix_assets_path work\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n return\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# SET FG INDIRECT (MARKET) GATEWAY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# custom site\nenv=${gateway_path}\n\n# set root\ncd ${env}\nsetr\n\n# set tests\ncd ${gr}/tests\nsett\n\n# set assets\ncd ${phoenix_assets_path}/2017\nsetA\n\n# set documentation\ncd ${work}/documentation/gateway\nsetD\n\n# set docker\ncd ~/dev_docker\nsetV\n\ngr\n" }, { "alpha_fraction": 0.27566540241241455, "alphanum_fraction": 0.2775665521621704, "avg_line_length": 24, "blob_id": "8f7e86b0364484a766997f83683d0f2312514803", "content_id": "271e297a921b9f30b6f8c57703669dd9658c014f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 526, "license_type": "no_license", "max_line_length": 67, "num_lines": 21, "path": "/phpurl.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# OPEN ht SCRIPT IN A BROWSER\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nfile=\"$1\"\n${xec}/phpurl.pl `pwd -P` ${file}\n\n" }, { "alpha_fraction": 0.47863924503326416, "alphanum_fraction": 0.4833860695362091, "avg_line_length": 18.461538314819336, "blob_id": "76cc6fd29b4f29c7e1c55022842b274b438e5ab1", "content_id": "d993b4f1e4606150477255ab67d36beb5c5905be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1264, "license_type": "no_license", "max_line_length": 84, "num_lines": 65, "path": "/projects/bs/bs-core-db-load.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports gR\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n exit\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# LOAD BS DB\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nuser='root'\npassword='password'\ndatabase='bitesquad'\n\ngR\n\nif [[ -z \"$1\" ]]; then\n echo 'Please supply the SQL input file as an argument'\n exit\nfi\ninput=\"$1\"\n\ndoDump=true\nif [[ -n \"$2\" ]]; then\n if [[ 'false' = $2 ]]; then\n doDump=false\n fi\nfi\n\necho ''\n\nif $doDump; then\n echo '...dumping current DB'\n mysqldump -u${user} -p${password} ${database} > `date +%Y-%m-%d`_${database}.sql\n gzip `date +%Y-%m-%d`_${database}.sql\nfi\n\necho \"...loading $input\"\nsql_file=${input%.gz}\nif [ \"$sql_file\" != \"$input\" ]; then\n gunzip $input\nfi\nmysql -u${user} -p${password} -D${database} < $sql_file\necho ''\n\nexit" }, { "alpha_fraction": 0.32159265875816345, "alphanum_fraction": 0.3246554434299469, "avg_line_length": 18.205883026123047, "blob_id": "ee575ded2a69950a1a7dd08b381e72603255de60", "content_id": "0262ef0fb1076b03f9e7bbedea15f0d9b1130eb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 653, "license_type": "no_license", "max_line_length": 67, "num_lines": 34, "path": "/which-shs.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports='ss'\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# CUSTOM WHICH COMMAND\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nif [[ -n \"$1\" ]]; then\n input=\"$1\"\nelse\n echo -n \"Command: \"\n read input\nfi\n\necho ''\n\ngrep ${input} ~/.zshrc\ngrep -r ${input} ${ss}/zsh\necho ''\n\nalias ${input}\necho ''\n" }, { "alpha_fraction": 0.4420783519744873, "alphanum_fraction": 0.4463373124599457, "avg_line_length": 22.019607543945312, "blob_id": "b211e3681a3f6361f16b9f8d9e173065eed31ca5", "content_id": "9661484056d6d310cd3d839910ef2e53e4ca9fe7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1174, "license_type": "no_license", "max_line_length": 74, "num_lines": 51, "path": "/git-apply-patch.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports='gPP'\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# GIT BRANCH COPY\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nif [[ -z \"$1\" ]]; then\n echo ''\n echo 'Please supply a search term to match an available patch below:'\n echo ''\n ls ${gPP}/*\n echo ''\n exit\nfi\n\nparentDirectory=\"${gPP}/\"\npatchDirectory=\"\"\npatchName=\"${1}\"\nif [[ -n \"${2}\" ]]; then\n patchDirectory=\"${1}/\"\n patchName=\"${2}\"\nfi\n\nIFS=$'\\n'\nfor patch in $( ls ${parentDirectory}${patchDirectory} ); do\n if [[ ${patch} =~ \"${patchName}\" ]]; then\n patchToApply=\"${patch}\"\n break\n fi\ndone\n\nif [ -n \"${patchToApply}\" ]; then\n git apply < ${parentDirectory}${patchDirectory}${patchToApply}\n echo -n 'Patch applied: '; echo \\$gPP/${patchDirectory}${patchToApply}\n git status\nelse\n echo 'Patch not found'\nfi\n" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 8, "blob_id": "35dae7761d7aba547d94b19ce041a52e50bddb91", "content_id": "adec7b320ae32029545c570583c17567bb1d9e9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 45, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/projects/fg/get-ssn-rg.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho '666100755' | pbcopy\n\nexit\n" }, { "alpha_fraction": 0.3801169693470001, "alphanum_fraction": 0.38479530811309814, "avg_line_length": 22.08108139038086, "blob_id": "f0616a4eb2890fb81c85f09a2c266e655fc1f549", "content_id": "3acccd934b917d6ef5540677eef79a9c7795fd9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 855, "license_type": "no_license", "max_line_length": 196, "num_lines": 37, "path": "/vox.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# FUN TIME COMPUTER VOICE\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nif [[ -z \"$1\" ]]; then\n\n echo ''\n echo 'USAGE::'\n echo ''\n echo ' vox \"$msg\" $voice [agnes, kathy, princess, vicki, victoria, bruce, fred, junior, ralph, albert, bahh, bells, boing, bubbles, cellos, deranged, hysterical, trinoids, whisper, zarvox]'\n echo ''\n exit\n\nelse\n msg=\"$1\"\nfi\n\nif [[ -n \"$2\" ]]; then\n voice=\"$2\"\nfi\n\nsay -v \"${voice}\" \"${msg}\"\n\n" }, { "alpha_fraction": 0.519568920135498, "alphanum_fraction": 0.5218377709388733, "avg_line_length": 13.450819969177246, "blob_id": "24d6869f8c7f7932221af04faa2c0dbcefbc0d70", "content_id": "20569baec501fff77c9c5d5f1592d2774f7f4c42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1763, "license_type": "no_license", "max_line_length": 69, "num_lines": 122, "path": "/environments/laravel5.3.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports home\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n return\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# SET LARAVEL ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nif [[ -z \"$1\" ]]; then\n\n echo 'Which project? '\n read env\n\nelse\n env=\"$1\"\nfi\n\n# remove all slashes\nenv=`echo ${env} | sed 's/[/]//g'`\n\n# extract the project name\nproj=`echo ${env} | sed 's/_.*//'`\n\n# set root\ncd ${env}\nsetR\n\n# set routes\ncd ${gR}/routes\nsetO\nseto\n\n# set db alters\ncd ${gR}/database/migrations\nsetD\n\n# set public\ncd ${gR}/public\nsetP\n\n# set compiled js\ncd ${gP}/js\nsetJ\n\n# set compiled style\ncd ${gP}/css\nsetX\n\n# set tests\ncd ${gR}/tests\nsetT\n\n# set laravel source\ncd ${gR}/vendor/laravel/framework/src/Illuminate\nsetS\n\n# set assets\nif [ -d \"${home}/assets/${env}\" ]; then\n cd ${home}/assets/${env}\n year=`date \"+%Y\"`\n\n if [ -d $year ]; then\n cd ${year}\n fi\n\n setAA\nfi\n\n# set controllers\ncd ${gm}/Http/Controllers\nsetc\n\n# set models\ncd ${gR}/app\nsetm\n\n# set views\ncd ${gR}/resources/views\nsetv\n\n# set js\ncd ${gR}/resources/assets/js\nsetj\n\n# set style\ncd ${gR}/resources/assets\nif [ -d 'sass' ]; then\n cd sass\n setx\nelif [ -d 'scss' ]; then\n cd scss\n setx\nelif [ -d 'less' ]; then\n cd less\n setx\nelif [ -d 'css' ]; then\n cd css\n setx\nfi\n\ngR\n" }, { "alpha_fraction": 0.4054054021835327, "alphanum_fraction": 0.4054054021835327, "avg_line_length": 20.705883026123047, "blob_id": "2c24299d4f800a43f8f99974719ff76848dd237e", "content_id": "42174dbf7e85620bacc87f19e369e8965bf4900c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 370, "license_type": "no_license", "max_line_length": 67, "num_lines": 17, "path": "/create-php-ctags-cscope.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# CREATE PHP CTAGS AND CSCOPE\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# remove existing\nrm -fr .cscope.*\nrm -fr .tags\n\n# create tags\nctags -f .tags -R .\n\n# create cscope\nfind `pwd` -name '*.php' > .cscope.files\ncscope -ub -i .cscope.files -f .cscope.out\n\n" }, { "alpha_fraction": 0.49694502353668213, "alphanum_fraction": 0.5010183453559875, "avg_line_length": 22.380952835083008, "blob_id": "a4e5bd33775293df2fd397d1889ead232776d116", "content_id": "f9c2c75e8c0f63235f0a70ec51050e1b8d7c8a85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 491, "license_type": "no_license", "max_line_length": 67, "num_lines": 21, "path": "/_bootstrap.zsh", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/zsh\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# BOOTSTRAP/VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-zsh-variables.zsh\n\n# validate all variables\nverify_zsh_exports $zsh_exports\nzsh_exports_valid=$?\n\n# validate all aliases\nverify_zsh_aliases $zsh_aliases\nzsh_aliases_valid=$?\n\nif [[ 1 = ${zsh_exports_valid} && 1 = ${zsh_aliases_valid} ]]; then\n setopt aliases\n source ~/.zshrc\nfi\n" }, { "alpha_fraction": 0.32576984167099, "alphanum_fraction": 0.32901135087013245, "avg_line_length": 20.275861740112305, "blob_id": "ea9c3899f52762b6d9bcde153536b6ab2b1919f4", "content_id": "965c0fc49dedbfba92bdf586379ec05735a8d545", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 617, "license_type": "no_license", "max_line_length": 67, "num_lines": 29, "path": "/archives/envs-diff.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases='envsCopy'\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# COPY SHELL ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nenvFile=''\nif [[ -z \"$1\" ]]; then\n envFile='_env.bash'\nelse\n envFile=\"$1\"\nfi\n\nenvsCopy > .tmp\ndiff ${envFile} .tmp\nrm -fr .tmp\n" }, { "alpha_fraction": 0.4016686677932739, "alphanum_fraction": 0.4040524363517761, "avg_line_length": 22.30555534362793, "blob_id": "fc56304944c930a400a514a0f7a4adec0b658ba4", "content_id": "c9dc41f4bcd88a39887c8ce7ddccb74f7cf67d24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 839, "license_type": "no_license", "max_line_length": 67, "num_lines": 36, "path": "/create-task.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases='create_vim_task create_sublime_task'\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# CREATE TASK\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# get task id\nif [[ -n \"$1\" ]]; then\n task_id=\"$1\"\nelse\n echo -n \"Task ID: \"\n read task_id\nfi\n\nif echo \"$EDITOR\" | grep -q 'vim'; then\n create_vim_task ${task_id}\nelif echo \"$EDITOR\" | grep -q 'subl'; then\n create_sublime_task ${task_id}\nelif echo \"$EDITOR\" | grep -q 'code'; then\n create_vscode_task ${task_id}\nfi\n\nexit\n" }, { "alpha_fraction": 0.37644585967063904, "alphanum_fraction": 0.38065195083618164, "avg_line_length": 20.613636016845703, "blob_id": "983f7ebfb1765604ceb24b99472a71fcced48779", "content_id": "6bd65761afea7256e363226b2da42609701da33b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 951, "license_type": "no_license", "max_line_length": 67, "num_lines": 44, "path": "/archives/ack-concat-results.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# JOIN ACK RESULTS LIST (ONE-PER-LINE) ONTO A SINGLE LINE\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nignoreCase=false\n\nif [[ -n \"$1\" ]]; then\n while [ -n \"$1\" ]; do\n case \"$1\" in\n -i) ignoreCase=true ;;\n *) searchTerm=\"$1\" ;;\n esac\n shift\n done\nelse\n echo ''\n echo \"Please feed me lines of input\"\n echo ''\n exit\nfi\n\nif $ignoreCase; then\n output=`ack -li ${searchTerm} | tr '\\n' ' '`\nelse\n output=`ack -l ${searchTerm} | tr '\\n' ' '`\nfi\n\necho ${output}\necho ${output} | pbcopy\n" }, { "alpha_fraction": 0.5283018946647644, "alphanum_fraction": 0.7169811129570007, "avg_line_length": 9.600000381469727, "blob_id": "b9326d700f1436c1e331967e29b501fb3338282d", "content_id": "77cbb3cf8fe7927447fdebd6dda2517da83b3691", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 53, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/projects/fg/get-vin.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho '4S4BRBLC9B3332483' | pbcopy\n\nexit\n" }, { "alpha_fraction": 0.36771300435066223, "alphanum_fraction": 0.37107622623443604, "avg_line_length": 21.871795654296875, "blob_id": "a06e15dca51fb1f673f45575169c7ba3fd7a999f", "content_id": "83e7c7f7c53b47b22cbf1e182225e6b398d5540a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 892, "license_type": "no_license", "max_line_length": 67, "num_lines": 39, "path": "/git-apply-stash.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# GIT BRANCH COPY\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nstashName='current-dev-base'\nif [[ -n \"$1\" ]]; then\n stashName=\"$1\"\nfi\n\nIFS=$'\\n'\nfor stash in $( git stash list ); do\n if [[ $stash =~ \"$stashName\" ]]; then\n stashIndex=`echo $stash | sed 's/^stash@{\\([^}]\\)}.*$/\\1/'`\n if [ -n \"${stashIndex}\" ]; then\n break\n fi\n fi\ndone\n\nif [ -n \"${stashIndex}\" ]; then\n git stash apply stash@{${stashIndex}}\nelse\n echo 'Stash not found'\nfi\n" }, { "alpha_fraction": 0.7661290168762207, "alphanum_fraction": 0.7661290168762207, "avg_line_length": 30.125, "blob_id": "dd5b4d49db520ec834e876b898f90b262bff9ed4", "content_id": "b474aa911f7b59e22b2eca1cf551d03a2f73b160", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 248, "license_type": "no_license", "max_line_length": 104, "num_lines": 8, "path": "/new-ftp.command", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "osascript -e 'tell application \"Terminal\"\n\tactivate\n\ttell application \"System Events\"\n\t\tkeystroke \"n\" using {command down}\n\tend tell\nend tell'\n\nosascript -e 'tell application \"Terminal\"\tto set current settings of first window to settings set \"FTP\"'" }, { "alpha_fraction": 0.7070707082748413, "alphanum_fraction": 0.7070707082748413, "avg_line_length": 15.5, "blob_id": "e95184eec1b766d71f90159114ade06833eceb9b", "content_id": "8890133473e173ff23e48eb70bce87aa5d01df01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 99, "license_type": "no_license", "max_line_length": 42, "num_lines": 6, "path": "/projects/bs/get-gift-card-codes.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ntail ${gDD}/data/gift-card-codes.txt\n${xec}/projects/bs/get-gift-card-code.bash\n\nexit\n" }, { "alpha_fraction": 0.8545454740524292, "alphanum_fraction": 0.8545454740524292, "avg_line_length": 55, "blob_id": "c8aeda0d993418a1d38b8c61afe7c033c0b12d9b", "content_id": "464b3a7122e0af8ec32d583fea40d28f517084ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 55, "license_type": "no_license", "max_line_length": 55, "num_lines": 1, "path": "/dot-env/.zshrc", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "/Users/stevensteele/ssteele/zsh/config/li-macbook.zshrc" }, { "alpha_fraction": 0.41638946533203125, "alphanum_fraction": 0.41638946533203125, "avg_line_length": 18.1156063079834, "blob_id": "db9c2083dbe802885ce8d0dba8a5c34b47e6661f", "content_id": "d1a0dc310c5a00821e697eccb08584cc9de02ec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 6614, "license_type": "no_license", "max_line_length": 67, "num_lines": 346, "path": "/envs-copy.zsh", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/zsh\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nzsh_exports='HOME'\nzsh_aliases=''\n\nif [[ ! -z $zsh_exports ]] || [[ ! -z $zsh_aliases ]]; then\n source ${xec}/_bootstrap.zsh\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# COPY SHELL ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nenvs=()\n\necho ''\n\nif [ -n \"$gAA\" ]; then\n envs+=( 'cd '$gAA'; setAA' )\nfi\nif [ -n \"$gBB\" ]; then\n envs+=( 'cd '$gBB'; setBB' )\nfi\nif [ -n \"$gCC\" ]; then\n envs+=( 'cd '$gCC'; setCC' )\nfi\nif [ -n \"$gDD\" ]; then\n envs+=( 'cd '$gDD'; setDD' )\nfi\nif [ -n \"$gEE\" ]; then\n envs+=( 'cd '$gEE'; setEE' )\nfi\nif [ -n \"$gFF\" ]; then\n envs+=( 'cd '$gFF'; setFF' )\nfi\nif [ -n \"$gGG\" ]; then\n envs+=( 'cd '$gGG'; setGG' )\nfi\nif [ -n \"$gHH\" ]; then\n envs+=( 'cd '$gHH'; setHH' )\nfi\nif [ -n \"$gII\" ]; then\n envs+=( 'cd '$gII'; setII' )\nfi\nif [ -n \"$gJJ\" ]; then\n envs+=( 'cd '$gJJ'; setJJ' )\nfi\nif [ -n \"$gKK\" ]; then\n envs+=( 'cd '$gKK'; setKK' )\nfi\nif [ -n \"$gLL\" ]; then\n envs+=( 'cd '$gLL'; setLL' )\nfi\nif [ -n \"$gMM\" ]; then\n envs+=( 'cd '$gMM'; setMM' )\nfi\nif [ -n \"$gNN\" ]; then\n envs+=( 'cd '$gNN'; setNN' )\nfi\nif [ -n \"$gOO\" ]; then\n envs+=( 'cd '$gOO'; setOO' )\nfi\nif [ -n \"$gPP\" ]; then\n envs+=( 'cd '$gPP'; setPP' )\nfi\nif [ -n \"$gQQ\" ]; then\n envs+=( 'cd '$gQQ'; setQQ' )\nfi\nif [ -n \"$gRR\" ]; then\n envs+=( 'cd '$gRR'; setRR' )\nfi\nif [ -n \"$gSS\" ]; then\n envs+=( 'cd '$gSS'; setSS' )\nfi\nif [ -n \"$gTT\" ]; then\n envs+=( 'cd '$gTT'; setTT' )\nfi\nif [ -n \"$gUU\" ]; then\n envs+=( 'cd '$gUU'; setUU' )\nfi\nif [ -n \"$gVV\" ]; then\n envs+=( 'cd '$gVV'; setVV' )\nfi\nif [ -n \"$gWW\" ]; then\n envs+=( 'cd '$gWW'; setWW' )\nfi\nif [ -n \"$gXX\" ]; then\n envs+=( 'cd '$gXX'; setXX' )\nfi\nif [ -n \"$gYY\" ]; then\n envs+=( 'cd '$gYY'; setYY' )\nfi\nif [ -n \"$gZZ\" ]; then\n envs+=( 'cd '$gZZ'; setZZ' )\nfi\n\nif [ -n \"$gA\" ]; then\n envs+=( 'cd '$gA'; setA' )\nfi\nif [ -n \"$gB\" ]; then\n envs+=( 'cd '$gB'; setB' )\nfi\nif [ -n \"$gC\" ]; then\n envs+=( 'cd '$gC'; setC' )\nfi\nif [ -n \"$gD\" ]; then\n envs+=( 'cd '$gD'; setD' )\nfi\nif [ -n \"$gE\" ]; then\n envs+=( 'cd '$gE'; setE' )\nfi\nif [ -n \"$gF\" ]; then\n envs+=( 'cd '$gF'; setF' )\nfi\nif [ -n \"$gG\" ]; then\n envs+=( 'cd '$gG'; setG' )\nfi\nif [ -n \"$gH\" ]; then\n envs+=( 'cd '$gH'; setH' )\nfi\nif [ -n \"$gI\" ]; then\n envs+=( 'cd '$gI'; setI' )\nfi\nif [ -n \"$gJ\" ]; then\n envs+=( 'cd '$gJ'; setJ' )\nfi\nif [ -n \"$gK\" ]; then\n envs+=( 'cd '$gK'; setK' )\nfi\nif [ -n \"$gL\" ]; then\n envs+=( 'cd '$gL'; setL' )\nfi\nif [ -n \"$gM\" ]; then\n envs+=( 'cd '$gM'; setM' )\nfi\nif [ -n \"$gN\" ]; then\n envs+=( 'cd '$gN'; setN' )\nfi\nif [ -n \"$gO\" ]; then\n envs+=( 'cd '$gO'; setO' )\nfi\nif [ -n \"$gP\" ]; then\n envs+=( 'cd '$gP'; setP' )\nfi\nif [ -n \"$gQ\" ]; then\n envs+=( 'cd '$gQ'; setQ' )\nfi\nif [ -n \"$gR\" ]; then\n envs+=( 'cd '$gR'; setR' )\nfi\nif [ -n \"$gS\" ]; then\n envs+=( 'cd '$gS'; setS' )\nfi\nif [ -n \"$gT\" ]; then\n envs+=( 'cd '$gT'; setT' )\nfi\nif [ -n \"$gU\" ]; then\n envs+=( 'cd '$gU'; setU' )\nfi\nif [ -n \"$gV\" ]; then\n envs+=( 'cd '$gV'; setV' )\nfi\nif [ -n \"$gW\" ]; then\n envs+=( 'cd '$gW'; setW' )\nfi\nif [ -n \"$gX\" ]; then\n envs+=( 'cd '$gX'; setX' )\nfi\nif [ -n \"$gY\" ]; then\n envs+=( 'cd '$gY'; setY' )\nfi\nif [ -n \"$gZ\" ]; then\n envs+=( 'cd '$gZ'; setZ' )\nfi\n\nif [ -n \"$gaa\" ]; then\n envs+=( 'cd '$gaa'; setaa' )\nfi\nif [ -n \"$gbb\" ]; then\n envs+=( 'cd '$gbb'; setbb' )\nfi\nif [ -n \"$gcc\" ]; then\n envs+=( 'cd '$gcc'; setcc' )\nfi\nif [ -n \"$gdd\" ]; then\n envs+=( 'cd '$gdd'; setdd' )\nfi\nif [ -n \"$gee\" ]; then\n envs+=( 'cd '$gee'; setee' )\nfi\nif [ -n \"$gff\" ]; then\n envs+=( 'cd '$gff'; setff' )\nfi\nif [ -n \"$ggg\" ]; then\n envs+=( 'cd '$ggg'; setgg' )\nfi\nif [ -n \"$ghh\" ]; then\n envs+=( 'cd '$ghh'; sethh' )\nfi\nif [ -n \"$gii\" ]; then\n envs+=( 'cd '$gii'; setii' )\nfi\nif [ -n \"$gjj\" ]; then\n envs+=( 'cd '$gjj'; setjj' )\nfi\nif [ -n \"$gkk\" ]; then\n envs+=( 'cd '$gkk'; setkk' )\nfi\nif [ -n \"$gll\" ]; then\n envs+=( 'cd '$gll'; setll' )\nfi\nif [ -n \"$gmm\" ]; then\n envs+=( 'cd '$gmm'; setmm' )\nfi\nif [ -n \"$gnn\" ]; then\n envs+=( 'cd '$gnn'; setnn' )\nfi\nif [ -n \"$goo\" ]; then\n envs+=( 'cd '$goo'; setoo' )\nfi\nif [ -n \"$gpp\" ]; then\n envs+=( 'cd '$gpp'; setpp' )\nfi\nif [ -n \"$gqq\" ]; then\n envs+=( 'cd '$gqq'; setqq' )\nfi\nif [ -n \"$grr\" ]; then\n envs+=( 'cd '$grr'; setrr' )\nfi\nif [ -n \"$gss\" ]; then\n envs+=( 'cd '$gss'; setss' )\nfi\nif [ -n \"$gtt\" ]; then\n envs+=( 'cd '$gtt'; settt' )\nfi\nif [ -n \"$guu\" ]; then\n envs+=( 'cd '$guu'; setuu' )\nfi\nif [ -n \"$gvv\" ]; then\n envs+=( 'cd '$gvv'; setvv' )\nfi\nif [ -n \"$gww\" ]; then\n envs+=( 'cd '$gww'; setww' )\nfi\nif [ -n \"$gxx\" ]; then\n envs+=( 'cd '$gxx'; setxx' )\nfi\nif [ -n \"$gyy\" ]; then\n envs+=( 'cd '$gyy'; setyy' )\nfi\nif [ -n \"$gzz\" ]; then\n envs+=( 'cd '$gzz'; setzz' )\nfi\n\nif [ -n \"$ga\" ]; then\n envs+=( 'cd '$ga'; seta' )\nfi\nif [ -n \"$gb\" ]; then\n envs+=( 'cd '$gb'; setb' )\nfi\nif [ -n \"$gc\" ]; then\n envs+=( 'cd '$gc'; setc' )\nfi\nif [ -n \"$gd\" ]; then\n envs+=( 'cd '$gd'; setd' )\nfi\nif [ -n \"$ge\" ]; then\n envs+=( 'cd '$ge'; sete' )\nfi\nif [ -n \"$gf\" ]; then\n envs+=( 'cd '$gf'; setf' )\nfi\nif [ -n \"$gg\" ]; then\n envs+=( 'cd '$gg'; setg' )\nfi\nif [ -n \"$gh\" ]; then\n envs+=( 'cd '$gh'; seth' )\nfi\nif [ -n \"$gi\" ]; then\n envs+=( 'cd '$gi'; seti' )\nfi\nif [ -n \"$gj\" ]; then\n envs+=( 'cd '$gj'; setj' )\nfi\nif [ -n \"$gk\" ]; then\n envs+=( 'cd '$gk'; setk' )\nfi\nif [ -n \"$gl\" ]; then\n envs+=( 'cd '$gl'; setl' )\nfi\nif [ -n \"$gm\" ]; then\n envs+=( 'cd '$gm'; setm' )\nfi\nif [ -n \"$gn\" ]; then\n envs+=( 'cd '$gn'; setn' )\nfi\nif [ -n \"$go\" ]; then\n envs+=( 'cd '$go'; seto' )\nfi\nif [ -n \"$gp\" ]; then\n envs+=( 'cd '$gp'; setp' )\nfi\nif [ -n \"$gq\" ]; then\n envs+=( 'cd '$gq'; setq' )\nfi\nif [ -n \"$gr\" ]; then\n envs+=( 'cd '$gr'; setr' )\nfi\nif [ -n \"$gs\" ]; then\n envs+=( 'cd '$gs'; sets' )\nfi\nif [ -n \"$gt\" ]; then\n envs+=( 'cd '$gt'; sett' )\nfi\nif [ -n \"$gu\" ]; then\n envs+=( 'cd '$gu'; setu' )\nfi\nif [ -n \"$gv\" ]; then\n envs+=( 'cd '$gv'; setv' )\nfi\nif [ -n \"$gw\" ]; then\n envs+=( 'cd '$gw'; setw' )\nfi\nif [ -n \"$gx\" ]; then\n envs+=( 'cd '$gx'; setx' )\nfi\nif [ -n \"$gy\" ]; then\n envs+=( 'cd '$gy'; sety' )\nfi\nif [ -n \"$gz\" ]; then\n envs+=( 'cd '$gz'; setz' )\nfi\n\nfor env in \"${envs[@]}\"; do\n # translate absolute to home-relative paths\n echo $env | sed -e \"s|${HOME}|~|\"\ndone\n\necho \"\"\necho \"gr\"\n" }, { "alpha_fraction": 0.6766666769981384, "alphanum_fraction": 0.6833333373069763, "avg_line_length": 22.076923370361328, "blob_id": "9da46629552fa0f330dfad760fca39fbc6bfe3f0", "content_id": "5f1c8eeca2081a443d37526978ce63cd46dc9afa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "no_license", "max_line_length": 69, "num_lines": 13, "path": "/archives/current-timestamp.py", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport subprocess\nimport time\n\ndef write_to_clipboard(output):\n process = subprocess.Popen(\n 'pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)\n process.communicate(output.encode('utf-8'))\n\nts = str(int(round(time.time())))\nprint(ts)\nwrite_to_clipboard(ts)\n" }, { "alpha_fraction": 0.3342210352420807, "alphanum_fraction": 0.33688414096832275, "avg_line_length": 21.08823585510254, "blob_id": "54442fc1050c9113f839a6a395b508975a0dd8e5", "content_id": "bdbbc162807891e86dd2baded4c713d891465f2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 751, "license_type": "no_license", "max_line_length": 67, "num_lines": 34, "path": "/envs-diff.zsh", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/zsh\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nzsh_exports=''\nzsh_aliases='envsCopy'\n\nif [[ ! -z $zsh_exports ]] || [[ ! -z $zsh_aliases ]]; then\n source ${xec}/_bootstrap.zsh\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# COPY SHELL ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nenvFile=''\nif [[ -z \"$1\" ]]; then\n envFile='_env.zsh'\n if [ ! -f ./_env.zsh ]; then\n echo ''\n echo 'mv _env.bash _env.zsh'\n echo ''\n envFile='_env.bash'\n fi\nelse\n envFile=\"$1\"\nfi\n\nenvsCopy > ${HOME}/_env\ndiff ${envFile} ${HOME}/_env\n" }, { "alpha_fraction": 0.7289719581604004, "alphanum_fraction": 0.7289719581604004, "avg_line_length": 16.83333396911621, "blob_id": "6a6a52ec27de3a2dd1d674ddca5f5dca6313195c", "content_id": "36e8ab114efb51814a49a82b6fb87f30d59fdb49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 107, "license_type": "no_license", "max_line_length": 46, "num_lines": 6, "path": "/projects/bs/get-credit-card-numbers.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ntail ${gDD}/data/credit-card-numbers.txt\n${xec}/projects/bs/get-credit-card-number.bash\n\nexit\n" }, { "alpha_fraction": 0.6318181753158569, "alphanum_fraction": 0.637499988079071, "avg_line_length": 26.5, "blob_id": "98f0057bfec404e0de494e00ce1e33e17f7e3df8", "content_id": "647cc4776214716adb94dfd04df997bf0ace497a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 880, "license_type": "no_license", "max_line_length": 92, "num_lines": 32, "path": "/password/generate.py", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys, subprocess\nfrom random import choice\n\n# copy to clipboard\ndef clipboard(data):\n process = subprocess.Popen('pbcopy', env={'LANG': 'en_US.UTF-8'}, stdin=subprocess.PIPE)\n process.communicate(data.encode('utf-8'))\n\n# generate password composed of character set chunks\ndef password(charsets, length):\n password = ''\n remaining = length\n\n while remaining > 0:\n charset = choice(charsets)\n\n charhash = charset['chars'].split(' ')\n charsetLength = choice(range(charset['min'], charset['max'] + 1))\n\n if (remaining > charsetLength):\n charsetCount = charsetLength\n remaining -= charsetLength\n else:\n charsetCount = remaining\n remaining = 0\n\n password += ''.join([choice(charhash) for i in range(charsetCount)])\n\n clipboard(password)\n print(password)\n" }, { "alpha_fraction": 0.6086956262588501, "alphanum_fraction": 0.6594203114509583, "avg_line_length": 24.090909957885742, "blob_id": "5468e857c9bbc8304033957af02c16c007cf2eb2", "content_id": "f649416a9d307bafcb71b42bb0c5055702c76eef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 81, "num_lines": 11, "path": "/when-harvi-was-adas-age.py", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom datetime import date as D\n\nadaBirthday = D(2020, 6, 8)\nadaAge = (D.today() - adaBirthday)\n\nharviBirthday = D(2016, 11, 16)\ndayHarviWasAdasAge = (harviBirthday + adaAge)\n\nprint('\\n ' + str(adaAge.days) + ' days -> ' + str(dayHarviWasAdasAge) + '\\n')\n" }, { "alpha_fraction": 0.3362831771373749, "alphanum_fraction": 0.39996153116226196, "avg_line_length": 66.5064926147461, "blob_id": "7c7ca30fa5b0451b36abf32322c2b6c54cef0da2", "content_id": "105d0713774116ce8c3361791348816a99d6117e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5198, "license_type": "no_license", "max_line_length": 333, "num_lines": 77, "path": "/ls-ignore.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nbash_exports=''\nbash_aliases=''\n\nif [[ ! -z $bash_exports ]] || [[ ! -z $bash_aliases ]]; then\n source ${xec}/_bootstrap.bash\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# IGNORE STUFF WHEN LISTING\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nif [[ -z \"$1\" ]]; then\n printf '\\n Not really ignoring anything here... try `ls` instead\\n\\n'\n exit\nfi\n\n# # Can't get code below to work\n# for arg in \"$@\"; do\n# ignore=\"${ignore} ! -name '*.${arg}'\"\n# done\n# find . -maxdepth 1$ignore\n\ninput_vars=$#\n\nif [[ $input_vars == 1 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\"\nelif [[ $input_vars == 2 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\"\nelif [[ $input_vars == 3 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\"\nelif [[ $input_vars == 4 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\"\nelif [[ $input_vars == 5 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\"\nelif [[ $input_vars == 6 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\"\nelif [[ $input_vars == 7 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\"\nelif [[ $input_vars == 8 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\"\nelif [[ $input_vars == 9 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\"\nelif [[ $input_vars == 10 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\"\nelif [[ $input_vars == 11 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\" ! -name \"*.$11\"\nelif [[ $input_vars == 12 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\" ! -name \"*.$11\" ! -name \"*.$12\"\nelif [[ $input_vars == 13 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\" ! -name \"*.$11\" ! -name \"*.$12\" ! -name \"*.$13\"\nelif [[ $input_vars == 14 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\" ! -name \"*.$11\" ! -name \"*.$12\" ! -name \"*.$13\" ! -name \"*.$14\"\nelif [[ $input_vars == 15 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\" ! -name \"*.$11\" ! -name \"*.$12\" ! -name \"*.$13\" ! -name \"*.$14\" ! -name \"*.$15\"\nelif [[ $input_vars == 16 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\" ! -name \"*.$11\" ! -name \"*.$12\" ! -name \"*.$13\" ! -name \"*.$14\" ! -name \"*.$15\" ! -name \"*.$16\"\nelif [[ $input_vars == 17 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\" ! -name \"*.$11\" ! -name \"*.$12\" ! -name \"*.$13\" ! -name \"*.$14\" ! -name \"*.$15\" ! -name \"*.$16\" ! -name \"*.$17\"\nelif [[ $input_vars == 18 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\" ! -name \"*.$11\" ! -name \"*.$12\" ! -name \"*.$13\" ! -name \"*.$14\" ! -name \"*.$15\" ! -name \"*.$16\" ! -name \"*.$17\" ! -name \"*.$18\"\nelif [[ $input_vars == 19 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\" ! -name \"*.$11\" ! -name \"*.$12\" ! -name \"*.$13\" ! -name \"*.$14\" ! -name \"*.$15\" ! -name \"*.$16\" ! -name \"*.$17\" ! -name \"*.$18\" ! -name \"*.$19\"\nelif [[ $input_vars == 20 ]]; then\n find . -maxdepth 1 ! -name \"*.$1\" ! -name \"*.$2\" ! -name \"*.$3\" ! -name \"*.$4\" ! -name \"*.$5\" ! -name \"*.$6\" ! -name \"*.$7\" ! -name \"*.$8\" ! -name \"*.$9\" ! -name \"*.$10\" ! -name \"*.$11\" ! -name \"*.$12\" ! -name \"*.$13\" ! -name \"*.$14\" ! -name \"*.$15\" ! -name \"*.$16\" ! -name \"*.$17\" ! -name \"*.$18\" ! -name \"*.$19\" ! -name \"*.$20\"\nelse\n printf '\\n This function supports up to 20 command-line arguments...\\n\\n'\nfi\n\nexit\n" }, { "alpha_fraction": 0.3757225573062897, "alphanum_fraction": 0.3786127269268036, "avg_line_length": 20.625, "blob_id": "379d553c28c951b72925150c3df40e7927fcfef9", "content_id": "520645ed06530823e65f99e09ee9b97e692714b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 692, "license_type": "no_license", "max_line_length": 88, "num_lines": 32, "path": "/config.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT (disabled because none of this will be available at this point)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# pass\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# CONFIGURE TERMINAL ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nif [[ -n \"$1\" ]]; then\n input=\"$1\"\nelse\n echo -n \"Environment: \"\n read input\nfi\n\necho ''\n\nrm -fr ~/.bashrc\ncp ~/bash/config/${input}.bashrc ~/.bashrc\n\nrm -fr ~/.vimrc\ncp ~/.vim/config/${input}.vimrc ~/.vimrc\n\necho 'Be sure to:'\necho 'source ~/.bashrc'\necho ''\n" }, { "alpha_fraction": 0.3674698770046234, "alphanum_fraction": 0.3674698770046234, "avg_line_length": 24.538461685180664, "blob_id": "6c852a3b3554ec8d9cb9aac856798a1094a78c19", "content_id": "d51252d195f1ae50923f88201168ad8c3ca4917f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 332, "license_type": "no_license", "max_line_length": 93, "num_lines": 13, "path": "/projects/fg/phoenix-db-reset-passwords.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# LOAD LOS DB\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nhost='fg_mysql'\nuser='root'\npassword='financegenius'\ndatabase='fg_los'\n\nmysql -h ${host} -u ${user} -p${password} -D ${database} < ${xec}/fg/reset-passwords-to-w.sql\n" }, { "alpha_fraction": 0.5421276688575745, "alphanum_fraction": 0.5438297986984253, "avg_line_length": 22.5, "blob_id": "6bb92e9e18b0510249d5d206e08062007ffbbd3e", "content_id": "7ec17764929c35df9cef23ada9905a4359ae5748", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1175, "license_type": "no_license", "max_line_length": 69, "num_lines": 50, "path": "/projects/bs/repair-vagrant.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports gR gDD\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n exit\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# REASSOCIATE VM\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nvagrantCommandPath='/usr/local/bin/vagrant'\nvirtualboxFolderPath=\"${gR}/.vagrant/machines/default/virtualbox\"\nvirtualboxConfigPath=\"${gDD}/conf/virtualbox\"\n\ncd ${gR}\n\nif [[ ! -f \"${virtualboxFolderPath}/private_key\" ]]; then\n # run vagrant status to create nonexistent directory\n ${vagrantCommandPath} status\n\n # copy config in if private_key nonexistent\n cp ${virtualboxConfigPath}/* ${virtualboxFolderPath}\n\n echo ''\n echo '...reassociating VM with VirtualBox'\n echo ''\nfi\n\n${vagrantCommandPath} status\n\nexit\n" }, { "alpha_fraction": 0.6379310488700867, "alphanum_fraction": 0.6379310488700867, "avg_line_length": 13.5, "blob_id": "5ff5763829193d91cd86dfb4839d7b9fcbc68d79", "content_id": "b9f7b79f1ee3d6a02059f12b31ac84ec3b74c87e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 116, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/projects/fg/phoenix_update_cscope.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncd ${ll}/sites/phoenix_beta\n\nfind . -name \"*.php\" > cscope.files\ncscope -q -R -b -i cscope.files\n\nexit\n" }, { "alpha_fraction": 0.5172955989837646, "alphanum_fraction": 0.5314465165138245, "avg_line_length": 17.171428680419922, "blob_id": "3816de2ce77978947f0f712f56cfc0ca42e06768", "content_id": "9faf05b9f613abcf7b596bc40f483606951c1b6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 636, "license_type": "no_license", "max_line_length": 60, "num_lines": 35, "path": "/verify-zsh-variables.zsh", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/zsh\n\nsource ~/.zshrc\n\n\n# return 1 if passed in strings are exported variables\n# ...0 otherwise\nfunction verify_zsh_exports {\n valid=1\n\n for var in \"$@\"; do\n if [[ ! -v ${var} ]]; then\n echo \" \\$${var} has not been exported\"\n valid=0\n fi\n done\n\n return ${valid}\n}\n\n\n# return 1 if passed in strings are properly aliased\n# ...0 otherwise\nfunction verify_zsh_aliases {\n valid=1\n\n for var in \"$@\"; do\n if [[ ! `alias ${var} 2>/dev/null` ]]; then\n echo \" '${var}' has not been properly aliased\"\n valid=0\n fi\n done\n\n return ${valid}\n}\n" }, { "alpha_fraction": 0.469255656003952, "alphanum_fraction": 0.47141316533088684, "avg_line_length": 20.090909957885742, "blob_id": "c4099a84c834b841b2849c4ee3eb6eefc80dc16c", "content_id": "42784e0484f1790ab62e0578ed0b8be4c270501a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 927, "license_type": "no_license", "max_line_length": 80, "num_lines": 44, "path": "/projects/bs/bs-core-db-dump.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports gR\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n exit\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# DUMP BS DB\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nuser='root'\npassword='password'\ndatabase='bitesquad'\n\ngR\n\necho ''\necho '...dumping current DB'\nmysqldump -u${user} -p${password} ${database} > `date +%Y-%m-%d`_${database}.sql\ngzip `date +%Y-%m-%d`_${database}.sql\necho ''\necho '...now invoke `bsdbfetch` from the host machine'\necho ''\n\nexit" }, { "alpha_fraction": 0.26135656237602234, "alphanum_fraction": 0.2831362783908844, "avg_line_length": 19.087499618530273, "blob_id": "ea0da9317facb741aa52dbab6fe46439343c92f4", "content_id": "53e858753fa53224b89bae2e82fb4c75dfe1c339", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3214, "license_type": "no_license", "max_line_length": 73, "num_lines": 160, "path": "/password/character.py", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# character lists\nnumbers = '0 1 2 3 4 5 6 7 8 9'\nalphasLower = 'a b c d e f g h i j k l m n o p q r s t u v w x y z'\nalphasUpper = 'A B C D E F G H I J K L M N O P Q R S T U V W X Y Z'\nspecials = '! @ # $ % ^ & * ( ) - _ = + | { } [ ] ; : \\' \" < . , > / ?'\nspecialsAbridged = '_ ! . & @'\niosSpecialsPageOne = '0 1 2 3 4 5 6 7 8 9 - / : ; ( ) $ & @ \" . , ? ! \\''\niosSpecialsPageTwo = '[ ] { } # % ^ * + = _ | ~ < > . , ? ! \\''\n\nspecialsRoundpoint = '! # $ % ( ) * @ ^ |'\n\n# dynamic character lists\ncustomSpecials = '! # $ % + / = @ ~'\n\n# character sets\nsets = {\n 'alpha': [\n {\n \"chars\": alphasLower,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": alphasUpper,\n \"min\": 3,\n \"max\": 5\n }\n ],\n 'numeric': [\n {\n \"chars\": numbers,\n \"min\": 3,\n \"max\": 5\n }\n ],\n 'alphanumeric': [\n {\n \"chars\": alphasLower,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": alphasUpper,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": numbers,\n \"min\": 3,\n \"max\": 5\n }\n ],\n 'all': [\n {\n \"chars\": alphasLower,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": alphasUpper,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": numbers,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": specials,\n \"min\": 3,\n \"max\": 5\n }\n ],\n 'abridged': [\n {\n \"chars\": alphasLower,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": alphasUpper,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": numbers,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": specialsAbridged,\n \"min\": 3,\n \"max\": 5\n }\n ],\n 'ios': [\n {\n \"chars\": alphasLower,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": alphasUpper,\n \"min\": 1,\n \"max\": 1\n },\n {\n \"chars\": iosSpecialsPageOne,\n \"min\": 3,\n \"max\": 4\n },\n {\n \"chars\": iosSpecialsPageTwo,\n \"min\": 3,\n \"max\": 3\n }\n ],\n 'custom': [\n {\n \"chars\": alphasLower,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": alphasUpper,\n \"min\": 1,\n \"max\": 1\n },\n {\n \"chars\": numbers,\n \"min\": 3,\n \"max\": 5\n }\n ],\n 'roundpoint': [\n {\n \"chars\": alphasLower,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": alphasUpper,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": numbers,\n \"min\": 3,\n \"max\": 5\n },\n {\n \"chars\": specialsRoundpoint,\n \"min\": 3,\n \"max\": 5\n }\n ],\n}\n" }, { "alpha_fraction": 0.4702380895614624, "alphanum_fraction": 0.4753401279449463, "avg_line_length": 18.94915199279785, "blob_id": "dca93763f017f92bc7355c3fe668b03e1085b45c", "content_id": "4aaa2b17b860ae163263b0497b02f318c9369750", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1176, "license_type": "no_license", "max_line_length": 98, "num_lines": 59, "path": "/projects/fg/phoenix-db-load.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports gr\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n exit\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# LOAD LOS DB\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nhost='fg_mysql'\nuser='root'\npassword='financegenius'\ndatabase='fg_los'\n\nif [[ -z \"$1\" ]]; then\n echo 'Please supply the SQL input file as an argument'\n exit\nfi\ninput=\"$1\"\n\ndoDump=true\nif [[ -n \"$2\" ]]; then\n if [[ 'false' = $2 ]]; then\n doDump=false\n fi\nfi\n\necho ''\n\nif $doDump; then\n echo '...dumping current DB'\n mysqldump -h ${host} -u ${user} -p${password} ${database} > ${gr}/`date +%Y-%m-%d`_phoenix.sql\nfi\n\necho \"...loading $input\"\nmysql -h ${host} -u ${user} -p${password} -D ${database} < $input\necho ''\n\nexit" }, { "alpha_fraction": 0.6102088093757629, "alphanum_fraction": 0.621809720993042, "avg_line_length": 19.5238094329834, "blob_id": "d5054b755b852948fa8377ad9b38fb4bf6b85c12", "content_id": "0729ef9b8608f563cd2f16c52043267cde2f9b04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 50, "num_lines": 21, "path": "/password/cpass.py", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nimport sys, character, generate\nfrom pprint import pprint\n\n# set defaults\nsetName = 'ios'\nlength = 16\n\n# interpret input: order doesn't matter\nargs = sys.argv\ndel args[0]\nif (len(args) > 0):\n for arg in sys.argv:\n try:\n if isinstance(int(arg), (int)):\n length = int(arg)\n except Exception:\n setName = arg\n\ngenerate.password(character.sets[setName], length)\n" }, { "alpha_fraction": 0.48595213890075684, "alphanum_fraction": 0.4880332946777344, "avg_line_length": 15.151260375976562, "blob_id": "01c0de7ed82aa469a07ea52723bddb4cb0d63aea", "content_id": "cbf46333dcbb0899a471742349562fbdf571fc79", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1922, "license_type": "no_license", "max_line_length": 69, "num_lines": 119, "path": "/environments/wordpress.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nsource ${xec}/verify-bash-variables.bash\n\n# validate all variables\nverify_bash_exports home\nbash_exports_valid=$?\n\n# validate all aliases\nverify_bash_aliases\nbash_aliases_valid=$?\n\nif [[ 0 = ${bash_exports_valid} || 0 = ${bash_aliases_valid} ]]; then\n return\nfi\n\nshopt -s expand_aliases\nsource ~/.bashrc\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# SET WORDPRESS ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# environment\nroot='sites'\n# root='public'\n\nif [[ -z \"$1\" ]]; then\n echo 'Which project? '\n read env\nelse\n env=\"$1\"\nfi\n\n# remove all slashes\nenv=`echo ${env} | sed 's/[/]//g'`\n\n# extract the project name\nproj=`echo ${env} | sed 's/_.*//'`\n\n# set root\ncd ${home}/${root}/${env}\nsetR\n\n# set assets\nif [ -d \"${home}/assets/${env}\" ]; then\n cd ${home}/assets/${env}\n year=`date \"+%Y\"`\n\n if [ -d $year ]; then\n cd ${year}\n fi\n\n setAA\nfi\n\ncd ${gR}/site\n\n# set plugins\nif [[ -d 'wp-content' || -d 'content' ]]; then\n cd ${gR}/site/*content*/plugins\n setp\nelif [ -d 'assets' ]; then\n cd ${gR}/site/*assets*/plugins\n setp\nfi\n\ncd ../themes\nsett\n\n# set parent theme\nif [ -d 'skeleton' ]; then\n cd skeleton\n sethh\n cd ..\nelif [ -d '*skeleton*' ]; then\n cd *skeleton*\n sethh\n cd ..\nfi\n\nfound_home=false\n\n# set home\nif [ -d ${env} ]; then\n found_home=true\n cd ${env}\n seth\nelif [ -d ${proj} ]; then\n found_home=true\n cd ${proj}\n seth\nelse\n echo 'Could not locate content directory'\nfi\n\n# set style\nif ${found_home}; then\n cd _\n if [ -d 'scss' ]; then\n cd scss\n setx\n elif [ -d 'sass' ]; then\n cd sass\n setx\n fi\nfi\n\nif ${found_home}; then\n gh\nelse\n gt\nfi\n" }, { "alpha_fraction": 0.395061731338501, "alphanum_fraction": 0.3955555558204651, "avg_line_length": 24.3125, "blob_id": "d728671621db837792a1ef54a133d37841ecfe2a", "content_id": "78c8fa1455c96adfe3e56c7aaeb4c6b54f89f4e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2025, "license_type": "no_license", "max_line_length": 75, "num_lines": 80, "path": "/envs-set.zsh", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/zsh\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# VERIFY ENVIRONMENT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nzsh_exports=''\nzsh_aliases=''\n\nif [[ ! -z $zsh_exports ]] || [[ ! -z $zsh_aliases ]]; then\n source ${xec}/_bootstrap.zsh\nfi\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# SET SHELL NAVIGATION VARIABLES\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndoSet=false\n\nif [[ -f ./_env.zsh || -f ./_env.bash ]]; then\n isZshEnvFile=false\n if [ -f ./_env.zsh ]; then\n isZshEnvFile=true\n fi\n\n if [[ `alias gA 2>/dev/null` ]]; then\n oldZshEnvFile=false\n if [ -f ${gA}/_env.zsh ]; then\n oldZshEnvFile=\"${gA}/_env.zsh\"\n elif [ -f ${gA}/_env.bash ]; then\n oldZshEnvFile=\"${gA}/_env.bash\"\n fi\n\n if [ \"${oldZshEnvFile}\" != false ] ; then\n envsDiff=$(${xec}/envs-diff.zsh ${oldZshEnvFile})\n if [[ -n \"${envsDiff}\" ]]; then\n echo ''\n echo ${envsDiff}\n echo 'There are unsaved changes in the current environment'\n echo ''\n else\n doSet=true\n fi\n fi\n else\n envsReport=$(${xec}/envs-report.zsh)\n if [[ -n \"${envsReport}\" ]]; then\n echo ''\n echo ${envsReport}\n echo 'Overwrite ^^ current environment? y/n'\n echo ''\n\n read doOverwrite\n if [[ \"${doOverwrite}\" = 'y' || \"${doOverwrite}\" = 'Y' ]]; then\n doSet=true\n fi\n else\n doSet=true\n fi\n fi\n\n if [ \"${doSet}\" = true ] ; then\n source ${xec}/envs-unset.zsh\n\n if [ \"${isZshEnvFile}\" = true ] ; then\n source ./_env.zsh\n else\n source ./_env.bash\n fi \n\n echo ' ...new environment set'\n echo ''\n fi \nelse\n echo ''\n echo 'Please add `_env.zsh`'\n echo ''\nfi\n" }, { "alpha_fraction": 0.6438356041908264, "alphanum_fraction": 0.6438356041908264, "avg_line_length": 11.166666984558105, "blob_id": "dee794d27ebd1af0bb496108705614a1f115ed1f", "content_id": "789ed2c69ef0ead91d0f0c5339c89b9f9edf9a70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 73, "license_type": "no_license", "max_line_length": 27, "num_lines": 6, "path": "/projects/fg/get-ssns-rg.bash", "repo_name": "ssteele/xecutables", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ntail ${gD}/data/ssns-rg.txt\n${xec}/fg/get-ssn-rg.bash\n\nexit\n" } ]
67
aramrashduni/mwtc-nd19
https://github.com/aramrashduni/mwtc-nd19
06bbf95d5fdf482a3fb02fcb7b9a66a15d3da647
4832ebf05a6d63b052ef79c1f74212dd893d5e27
750aaa1fa9aac943cb0a36be60db6d0361f0625f
refs/heads/master
2022-02-02T15:50:28.270885
2019-04-17T13:28:27
2019-04-17T13:28:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.641381025314331, "alphanum_fraction": 0.6620463728904724, "avg_line_length": 32.62711715698242, "blob_id": "75819bed2e70e38745b98af3f2be2dcde43ae1df", "content_id": "3b81d48241557449f894937d7784cea69c6268e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3968, "license_type": "no_license", "max_line_length": 109, "num_lines": 118, "path": "/case3/covariance_matrix.py", "repo_name": "aramrashduni/mwtc-nd19", "src_encoding": "UTF-8", "text": "import torch\n# from strategy import view_data\nimport numpy as np\nimport cvxopt as opt\nfrom cvxopt import blas, solvers\n\n# import matplotlib.pyplot as plt\n\n#for testing\n# import datetime\n\n# all_price_data_np, all_feature_data_np, feature_names_np = view_data('C3_train.pkl')\n# all_price_data = torch.from_numpy(all_price_data_np).clone().float()\n# all_feature_data = torch.from_numpy(all_feature_data_np).clone().float()\nday = 756\n\ndef update_data(price_arr, feature_arr):\n new_price_data = torch.cat(np.array([all_price_data, torch.from_numpy(price_arr).clone().float()]))\n new_feature_data = torch.cat(np.array([all_feature_data, torch.from_numpy(feature_arr).clone().float()]))\n\n\n# takes a price or feature vector and gives log returns vector of size 1 smaller\ndef calc_log_returns(vector):\n return (vector[1:] / vector[:-1]).log()\n\ndef covariance(x,y):\n return ((x-x.mean())*(y-y.mean())).sum() / (x.size(0)-1)\n\ndef cov_matrix(tensor):\n new_tensor = torch.zeros_like(tensor)\n for col in range(tensor.size(1)):\n new_tensor[:,col] = tensor[:,col] - tensor[:,col].mean()\n return torch.mm(new_tensor.t(), new_tensor) / (new_tensor.size(0)-1)\n\n\ndef generate_return_data():\n return_data = torch.zeros(all_price_data.size(0)-1, all_price_data.size(1))\n for ticker in range(all_price_data.size(1)):\n return_data[:,ticker] = calc_log_returns(all_price_data[:,ticker])\n return return_data\n\n\ndef get_return_vec(return_data_t):\n return [sum(return_data_t[i])/len(return_data_t[i]) for i in range(len(return_data_t))]\n\n#\n# def get_cov_matrix(return_covariances):\n# return return_covariances.numpy()\n\n# Now solve the Markowitz Problem with our return covariances\ndef optimal_portfolio(returns, N, covs):\n n = len(returns)\n returns = np.asmatrix(returns).astype(np.double)\n\n # N = 100\n mus = [10.**(5.0 * t/N - 1.0) for t in range(N)]\n\n # Convert to cvxopt matrices\n cov = covs.numpy().astype(np.double)\n S = opt.matrix(cov)\n pbar = opt.matrix(returns.reshape(-1,1))\n\n # Create constraint matrices\n G = -opt.matrix(np.eye(n)) # negative n x n identity matrix\n h = opt.matrix(0.0, (n ,1))\n A = opt.matrix(1.0, (1, n))\n b = opt.matrix(1.0)\n\n # Calculate efficient frontier weights using quadratic programming\n solvers.options['show_progress'] = False\n portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus]\n\n ## CALCULATE RISKS AND RETURNS FOR FRONTIER\n returns = [blas.dot(pbar, x) for x in portfolios]\n risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios]\n ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE\n m1 = np.polyfit(returns, risks, 2)\n # print(m1[2])\n # print(m1[0])\n # x1 = np.sqrt(m1[2] / m1[0])\n x1=0\n\n # CALCULATE THE OPTIMAL PORTFOLIO\n wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x']\n return np.asarray(wt), returns, risks\n\n# time1 = datetime.datetime.now()\n# weights1, returns1, risks1 = optimal_portfolio(return_data_t, 100)\n# diff1 = (datetime.datetime.now() - time1).total_seconds()*1000\n#\n# time2 = datetime.datetime.now()\n# weights2, returns2, risks2 = optimal_portfolio(return_data_t, 30)\n# diff2 = (datetime.datetime.now() - time2).total_seconds()*1000\n#\n# plt.plot(stds, means, 'o')\n# plt.ylabel('mean')\n# plt.xlabel('std')\n# plt.plot(risks, returns, 'y-o')\n# print(diff1)\n# print(diff2)\n# plt.show()\n#\n#\n# def portfolio_return(weights, returns):\n# return torch.dot(weights, returns)\n#\n#\n# # REACT TO UPDATE\n# def handle(inx, price, factors):\n# day = 756+inx\n# all_price_data, all_feature_data = update_data(price, factors)\n# price_covariances = cov_matrix(all_price_data)\n# return_data = generate_return_data()\n# return_data_t = return_data.t()\n# return_vec = get_return_vec(return_data_t)\n# return_covariances = cov_matrix(return_data)\n# weights, returns, risks = optimal_portfolio(return_data_t, 30, return_covariances)\n# return weights\n" }, { "alpha_fraction": 0.6416308879852295, "alphanum_fraction": 0.6444921493530273, "avg_line_length": 33.95000076293945, "blob_id": "7b6b458076950dde10c970998327e74d4db9639b", "content_id": "3878e259854c4cc1a9347db53a3f9722d9001684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1398, "license_type": "no_license", "max_line_length": 105, "num_lines": 40, "path": "/case3/model_classes.py", "repo_name": "aramrashduni/mwtc-nd19", "src_encoding": "UTF-8", "text": "# This file creates Pytorch modules useful in constructing features\nimport torch\n\n# returns a product of powers of the inputs\nclass ProductLayer(torch.nn.Module):\n def __init__(self, d_in, d_out, scalar_mult=False):\n super(ProductLayer, self).__init__()\n self.layer = torch.nn.Linear(d_in, d_out, bias=scalar_mult)\n\n # pseudo-logarithm function that is used for multiplying absolute values\n # without producing huge negative values for the logarithm\n def plog(self, input_tensor):\n # eliminate values too close to 0\n nonzero = input_tensor.abs().clamp(1e-5)\n return nonzero.log()\n\n def forward(self, input):\n input = self.plog(input)\n inter = self.layer(input)\n output = inter.exp()\n return output\n\nclass LinearLayer(torch.nn.Module):\n def __init__(self, d_in, d_out, bias=True):\n super(LinearLayer, self).__init__()\n self.layer = torch.nn.Linear(d_in, d_out, bias)\n\n def forward(self, input):\n output = self.layer(input)\n return output\n\n# some features you might not have to take the log of to be predictive, so concatenate logs with standard\nclass LogOutput(torch.nn.Module):\n def __init__(self):\n super(LogOutput, self).__init__()\n\n def forward(self, input):\n # output = torch.cat( (input, input.log()) , dim=1)\n output = input.log()\n return output\n" }, { "alpha_fraction": 0.7617865800857544, "alphanum_fraction": 0.7841191291809082, "avg_line_length": 43.77777862548828, "blob_id": "0ccefa0a2d895b78f25dab091c7ead09774695af", "content_id": "44a79c4c6e2e7356d595a18b00f4feb9905ce4c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 403, "license_type": "no_license", "max_line_length": 77, "num_lines": 9, "path": "/case3/README.txt", "repo_name": "aramrashduni/mwtc-nd19", "src_encoding": "UTF-8", "text": "All code is written in Python 3.6+\n\nFollowing instruction, we have not included the C3_train.pkl data file in the\nsubmission repository, though it must be included in the case3/ directory for\nour program to work. Please include C3_train.pkl in mwtc-nd19/case3/ to run\nstrategy.\n\nTo install the torch module, please run \"pip install pytorch\", or (\"pip3\ninstall pytorch\" depending on your version of pip)\n" }, { "alpha_fraction": 0.5920680165290833, "alphanum_fraction": 0.6037141680717468, "avg_line_length": 38.22222137451172, "blob_id": "41ed43c36fe611ca5a6ca06fbc318255f4287cb4", "content_id": "33736c671ca8390153c5e6d83ae3e92f56cdb8f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3177, "license_type": "no_license", "max_line_length": 129, "num_lines": 81, "path": "/case3/strategy.py", "repo_name": "aramrashduni/mwtc-nd19", "src_encoding": "UTF-8", "text": "import torch\nimport numpy as np\nimport pickle\nimport covariance_matrix as cm\nfrom model_classes import *\n\n\ndef load_object(file_name):\n \"\"\"load the pickled object\"\"\"\n with open(file_name, 'rb') as f:\n return pickle.load(f)\n\n\ndef view_data(data_path):\n data = load_object(data_path)\n prices = data['prices']\n names = data['features']['names']\n features = data['features']['values']\n # print(prices.shape)\n # print(names)\n # print(features.shape)\n return prices, features\n\n\nclass Strategy():\n def __init__(self):\n self.all_price_data, self.all_feature_data = view_data('C3_train.pkl')\n self.all_price_data = torch.from_numpy(self.all_price_data).clone().float()[:-1,:]\n self.all_feature_data = torch.from_numpy(self.all_feature_data).clone().float()\n self.all_return_data = torch.zeros(self.all_price_data.size(0)-1, self.all_price_data.size(1))\n for col in range(self.all_price_data.size(1)):\n self.all_return_data[:,col] = cm.calc_log_returns(self.all_price_data[:,col])\n\n self.models = []\n for ticker in range(680):\n stock_model = torch.nn.Sequential(\n LinearLayer(2,2),\n ProductLayer(2,1),\n LogOutput(),\n )\n stock_model.load_state_dict(torch.load(f'final_models/{ticker}.pt'))\n self.models.append(stock_model)\n\n # add new numpy data to tensor data\n def update_data(self, price_data, feature_data):\n self.all_price_data = torch.cat( (self.all_price_data, torch.from_numpy(price_data).float().unsqueeze(0) ) , dim=0)\n self.all_feature_data = torch.cat( (self.all_feature_data, torch.from_numpy(feature_data).float().unsqueeze(0) ) , dim=0)\n new_return_data = torch.zeros(self.all_price_data.size(0)-1, self.all_price_data.size(1))\n for col in range(self.all_price_data.size(1)):\n new_return_data[:,col] = cm.calc_log_returns(self.all_price_data[:,col])\n self.all_return_data = new_return_data.clone()\n\n def handle_update(self, inx, price, factors):\n \"\"\"Put your logic here\n Args:\n inx: zero-based inx in days\n price: [num_assets, ]\n factors: [num_assets, num_factors]\n Return:\n allocation: [num_assets, ]\n \"\"\"\n self.update_data(price, factors)\n all_rsi_data = self.all_feature_data[:,:,4]\n\n predicted_returns = []\n for ticker in range(680):\n rsi_data = all_rsi_data[:,ticker]*.01\n # current, previous\n rsi_inputs = torch.tensor([rsi_data[-1], rsi_data[-2]])\n model = self.models[ticker]\n pred_return = model(rsi_inputs).item()\n predicted_returns.append(pred_return)\n\n covariances = cm.cov_matrix(self.all_return_data)\n\n pred_returns_array = np.array(predicted_returns)#.reshape(1,-1)\n allocation, _, _ = cm.optimal_portfolio(pred_returns_array, 4, covariances)\n allocation = allocation.astype(np.float).flatten()\n assert price.shape[0] == factors.shape[0]\n # return np.array([1.0] * price.shape[0])\n return allocation\n" }, { "alpha_fraction": 0.5670992732048035, "alphanum_fraction": 0.5796093344688416, "avg_line_length": 39.95068359375, "blob_id": "9264353c7917b001c93344ec76f8e1b5e5acf4d2", "content_id": "76eb85b9c729e051066a5bbae41c99ce3ec32891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14948, "license_type": "no_license", "max_line_length": 169, "num_lines": 365, "path": "/case2/src/nd_final.py", "repo_name": "aramrashduni/mwtc-nd19", "src_encoding": "UTF-8", "text": "\nimport argparse\nimport random\n\nimport sys\nimport py_vollib.black_scholes as bs\nfrom py_vollib.black_scholes import implied_volatility\nimport datetime\nimport py_vollib.black_scholes.greeks.analytical as bsga\nimport py_vollib.black_scholes.greeks.numerical as bsgn\nimport numpy as np\nimport multiprocessing as mp\nimport math\nfrom client.exchange_service.client import BaseExchangeServerClient\nfrom protos.order_book_pb2 import Order\nfrom protos.service_pb2 import PlaceOrderResponse\n\n\ndef _get_spread_scaled(bsp, wmp, ask, bid):\n x3 = ask - wmp\n x2 = wmp - bsp\n x1 = bsp - bid\n # print(bid, ask)\n ask = ask - (x2/(bid-ask))*x3\n bid = bid - (x1/(bid-ask))*x1\n return (ask - bid, (ask + bid)/2)\n\ndef _get_spread_logistic(volatility, best_spread, min_spread=0.02):\n epsilon = 0.05\n K = best_spread + epsilon # maximum spread\n Q = 1\n shift = 0.03 # init shift to accout for positive sigma, ARBITRARY (can be adjusted)\n nu = 0.7 # small nu make larger growth rates near larger sigma (and smaller for smaller)\n r = 0.7 # growth rate\n return min_spread + (K - min_spread)/(1+Q*math.exp(-r*volatility))**(1/nu)\n\n\n\ndef get_qty(*args, method=\"discrep\"):\n ''' arg:\n 0: spread\n 1: price discrepency\n '''\n if method == \"discrep\":\n # factors to determine scaling of qty with spread and price discrepency\n SPREAD_FACTOR = 10\n DISCREP_FACTOR = 0\n # print(args[0]*SPREAD_FACTOR + args[1]*DISCREP_FACTOR)\n return args[0]*SPREAD_FACTOR + args[1]*DISCREP_FACTOR\n\n\n# computes weighted price\ndef weighted_price(mid_price, imbalance, best_ask, best_bid):\n return imbalance*best_ask + (1-imbalance)*best_bid\n\n\nclass NDMarketMaker(BaseExchangeServerClient):\n\n\n def __init_asset_codes(self):\n asset_codes = {}\n for code in self.codes:\n asset_codes[code] = {}\n asset_codes[code][\"strike\"] = int(code[1:-3])\n asset_codes[code][\"vol\"] = 8.*abs(math.log(100./asset_codes[code][\"strike\"]))\n asset_codes[code][\"price\"] = bs.black_scholes(code[0].lower(), 100, asset_codes[code][\"strike\"],\n 0.25, 0, asset_codes[code][\"vol\"])\n return asset_codes\n\n \n def __init_inventory(self):\n inventory = {}\n for code in self.codes:\n inventory[code] = 0\n inventory[self.underlying_code] = 0\n return inventory\n\n\n def __init__(self, *args, **kwargs):\n self.START_TIME = datetime.datetime.now()\n BaseExchangeServerClient.__init__(self, *args, **kwargs)\n\n # self.codes = [\"C100PHX\", \"P100PHX\",]\n self.codes = [\"C98PHX\" , \"P98PHX\",\n \"C99PHX\", \"P99PHX\", \n \"C100PHX\", \"P100PHX\", \n \"C101PHX\", \"P101PHX\", \n \"C102PHX\", \"P102PHX\",]\n self._orderids = {}\n self.asset_codes = self.__init_asset_codes()\n self.underlying_code = \"IDX#PHX\"\n self.underlying_price = 100\n self.inventory = self.__init_inventory()\n self.potential_inventory = self.__init_inventory()\n self.ticks = [[0]] \n self.tick = 0\n self.order_pops = set([])\n\n\n # more complicated than we thought\n # as of now, assumes assets are uncorrelated\n def _get_vega(self, code):\n return bsgn.vega(code[0].lower(), self.underlying_price, self.asset_codes[code][\"strike\"], self.get_time_to_exp(),0,self.asset_codes[code][\"vol\"])\n\n\n def _get_delta(self, code):\n return bsgn.delta(code[0].lower(), self.underlying_price, self.asset_codes[code][\"strike\"], self.get_time_to_exp(),0, self.asset_codes[code][\"vol\"])\n\n\n # consolidate with _get_position_delta to avoid redundancy\n def _get_position_vega(self):\n inv = self.inventory\n vega = 0.\n net_position = sum(list(inv.values()))\n if not net_position: return 0\n for code in self.codes:\n # vega += (self.inventory[code]/net_position)*self._get_vega(code)\n vega += inv[code]*self._get_vega(code)\n return vega \n\n\n def _get_position_delta(self):\n inv = self.potential_inventory\n delta = 0.\n net_position = sum(list(inv.values()))\n if not net_position: return 0\n for code in self.codes:\n # delta += (self.inventory[code]/net_position)*self._get_delta(code)\n delta += inv[code]*self._get_delta(code)\n return delta \n\n def _get_position_delta_actual(self):\n inv = self.inventory\n delta = 0.\n net_position = sum(list(inv.values()))\n if not net_position: return 0\n for code in self.codes:\n # delta += (self.inventory[code]/net_position)*self._get_delta(code)\n delta += inv[code]*self._get_delta(code)\n return delta \n\n def _get_portfolio_delta(self):\n inv = self.potential_inventory\n return self._get_position_delta() + inv[self.underlying_code]\n\n def _get_portfolio_delta_actual(self):\n inv = self.inventory\n return self._get_position_delta_actual() + inv[self.underlying_code]\n\n def _make_order(self, asset_code, quantity, base_price, spread, bid=True):\n quantity = int(quantity if bid else -1*quantity)\n self.potential_inventory[asset_code] += quantity\n return Order(asset_code = asset_code, quantity= quantity,\n order_type = Order.ORDER_LMT,\n price = round(base_price-spread/2 if bid else base_price+spread/2, 2),\n competitor_identifier = self._comp_id)\n\n\n def _make_mkt_order(self, asset_code, quantity):\n self.potential_inventory[asset_code] += quantity\n return Order(asset_code = asset_code, quantity=int(quantity),\n order_type = Order.ORDER_MKT,\n competitor_identifier = self._comp_id)\n\n\n # computes time to expiration\n def get_time_to_exp(self):\n return (3-((datetime.datetime.now()-self.START_TIME).total_seconds()*1./900.))*(1./12.)\n\n\n # hedges delta of whole portfolio\n def rebalance_delta(self):\n option_delta = self._get_portfolio_delta_actual()\n if abs(option_delta) < 0.5:\n return\n order_resp = self.place_order(self._make_mkt_order(self.underlying_code, -option_delta)) # buy(sell) delta underlying for each option exchanged\n # print(\"dReb\", self.tick, \"|\", -option_delta)\n if type(order_resp) != PlaceOrderResponse:\n pass\n # print(4, order_resp)\n else:\n # self._orderids.add(order_resp.order_id)\n # print(order_resp.order_id)\n self._orderids[order_resp.order_id] = -option_delta\n\n def hedge_vega(self, fill):\n code = fill.order.asset_code\n qty = fill.filled_quantity\n vegaP = self._get_position_vega()\n vega = self._get_vega(code)\n hedge_qty = abs(qty)*(-vegaP)/vega\n if abs(hedge_qty) < 0.5: return\n order_resp = self.place_order(self._make_mkt_order(code, hedge_qty)) # buy(sell) -vega/vegaT of each option exchanged\n if type(order_resp) != PlaceOrderResponse:\n pass\n else:\n # print(order_resp.order_id)\n self._orderids[order_resp.order_id] = hedge_qty\n\n\n # In development\n def get_spread(self, *args, method=\"scaled\"):\n ''' Usage:\n Method:\n stoikov\n - arg0: volatility\n - arg1: base price\n logistic\n - arg0: vol\n - arg1: price\n '''\n if method == \"stoikov\":\n return args[0]*(self.get_time_to_exp()) + math.log(2)\n elif method == \"scaled\":\n return _get_spread_scaled(args[0],args[1],args[2],args[3])\n elif method == \"logistic\":\n return _get_spread_logistic(args[0], args[1])\n\n\n # gets meaured price, designed so we can easily adjust it\n def get_measured_price(self, *args, method=\"weighted\"):\n if method == \"weighted\":\n return weighted_price(args[0], args[1], args[2], args[3])\n if method == \"mid\":\n return (args[1]-args[2])/2\n\n\n # Sends orders to exchange\n def send_order(self, asset_code, qty, base_price, spread, tick, kind=\"lmt\"):\n qty = int(qty)\n # print(\"tick\",tick)\n if kind == \"lmt\":\n base_price = round(base_price, 2)\n ask_resp = self.place_order(self._make_order(asset_code, qty, base_price, spread, False))\n bid_resp = self.place_order(self._make_order(asset_code, qty, base_price, spread, True))\n \n if type(ask_resp) != PlaceOrderResponse:\n print(ask_resp)\n else:\n # print(ask_resp.order_id)\n self._orderids[ask_resp.order_id] = -qty\n self.ticks[tick].append(ask_resp.order_id)\n \n if type(bid_resp) != PlaceOrderResponse:\n print(bid_resp)\n else:\n # print(bid_resp.order_id)\n self._orderids[bid_resp.order_id] = qty\n self.ticks[tick].append(bid_resp.order_id)\n\n\n\n # Generates then sends orders\n MIN_SPREAD = 0.02\n def generate_limit_order(self, asset_code, measured_price, volatility, best_ask, best_bid, min_spread=MIN_SPREAD):\n bs_price = bs.black_scholes(asset_code[0].lower(), self.underlying_price, self.asset_codes[asset_code][\"strike\"], self.get_time_to_exp(), 0, volatility)\n # spread, base_price = self.get_spread(bs_price, measured_price, best_ask, best_bid, method=\"scaled\")\n # spread, base_price = (self.get_spread(volatility, measured_price, method=\"logistic\"), measured_price) # can switch to stoikov easily\n base_price = measured_price\n curr_best_spread = best_ask - best_bid\n spread = _get_spread_logistic(self.asset_codes[asset_code][\"vol\"], curr_best_spread)\n # spread = curr_best_spread + 0.02\n\n if spread < min_spread: return\n # print(spread - curr_best_spread)\n\n qty = min(get_qty(spread, abs(base_price - bs_price)), 5) + 1\n\n self.send_order(asset_code, qty, base_price, spread, self.tick)\n try:\n self.asset_codes[asset_code][\"vol\"] = bs.implied_volatility.implied_volatility(measured_price, self.underlying_price, self.asset_codes[asset_code][\"strike\"],\n self.get_time_to_exp(), 0, asset_code[0].lower())\n except:\n pass\n\n def handle_exchange_update(self, exchange_update_response):\n ''' Method for handling exchange updates\n - gathers and exchange update and responds\n - creates a process for each symbol\n --> (not robust, may not work, haven't experimented with\n memory sharing/dict access)\n '''\n self.tick += 1\n self.ticks.append([0])\n updates = {}\n for update in exchange_update_response.market_updates:\n updates[update.asset.asset_code] = update\n\n try:\n self.underlying_price = updates.get(self.underlying_code, 0).mid_market_price\n except AttributeError:\n pass\n\n print(\"pnl:\", exchange_update_response.competitor_metadata.pnl)\n deltaP = self._get_portfolio_delta() \n vegaP = self._get_position_vega()\n print(\"delta:\", self._get_portfolio_delta_actual())\n print(\"vega:\", vegaP)\n print(\"inventory:\", self.inventory)\n if exchange_update_response.fills:\n for fill in exchange_update_response.fills:\n # print(\"id\",fill.order.asset_code)\n try:\n qty_filled = np.sign(self._orderids[fill.order.order_id])*fill.filled_quantity\n except KeyError:\n print(fill.order.order_id, fill.order.order_id in self.order_pops)\n print(fill.filled_quantity, fill.remaining_quantity)\n sys.exit()\n\n self.inventory[fill.order.asset_code] += qty_filled\n if round(fill.order.remaining_quantity) == 0:\n # self._orderids.pop(fill.order.order_id, None)\n self.order_pops.add(fill.order.order_id)\n if fill.order.order_type == Order.ORDER_MKT:\n continue\n else:\n if fill.order.asset_code != self.underlying_code:\n vegaP = self._get_position_vega()\n if abs(vegaP) > 1: self.hedge_vega(fill)\n deltaP = self._get_portfolio_delta_actual()\n if abs(deltaP) > 10:\n self.rebalance_delta()\n self.potential_inventory = self.inventory\n\n for code in list(self.asset_codes.keys()):\n update = updates.get(code, 0)\n\n if not update or not update.bids or not update.asks:\n measured_price = self.asset_codes[code][\"price\"]\n init_spread = 0.5\n self.generate_limit_order(code, measured_price, self.asset_codes[code][\"vol\"], measured_price+init_spread/2, measured_price-init_spread/2, 0.02)\n else:\n spread = update.asks[0].price - update.bids[0].price\n imbalance = update.bids[0].size / (update.bids[0].size + update.asks[0].size)\n measured_price = self.get_measured_price(update.mid_market_price, imbalance, update.asks[0].price, update.bids[0].price)\n # measured_price = update.mid_market_price\n self.generate_limit_order(code, measured_price, self.asset_codes[code][\"vol\"], update.asks[0].price, update.bids[0].price, 0.02)\n\n # print(\"tick\",self.tick)\n if self.tick > 1:\n for order_id in self.ticks[self.tick-1]:\n if order_id == 0: continue\n cancellation = self.cancel_order(order_id)\n if cancellation.success != True:\n print(cancellation)\n else:\n pass\n # print(\"Success!\")\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Run the exchange client')\n parser.add_argument(\"--server_host\", type=str, default=\"localhost\")\n parser.add_argument(\"--server_port\", type=str, default=\"50052\")\n parser.add_argument(\"--client_id\", type=str)\n parser.add_argument(\"--client_private_key\", type=str)\n parser.add_argument(\"--websocket_port\", type=int, default=5678)\n \n args = parser.parse_args()\n host, port, client_id, client_pk, websocket_port = (args.server_host, args.server_port,\n args.client_id, args.client_private_key,\n args.websocket_port)\n \n client = NDMarketMaker(host, port, client_id, client_pk, websocket_port)\n client.start_updates()\n" }, { "alpha_fraction": 0.5925222039222717, "alphanum_fraction": 0.6116392016410828, "avg_line_length": 44.739131927490234, "blob_id": "efc885a29525e59f4c96feaa1efe9fda1897f885", "content_id": "3050a04535fec8f9d34fa900454f9eca6eabf3fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9468, "license_type": "no_license", "max_line_length": 185, "num_lines": 207, "path": "/case1/case1_strategy.py", "repo_name": "aramrashduni/mwtc-nd19", "src_encoding": "UTF-8", "text": "import argparse\nimport random\nimport numpy as np\nimport protos\nfrom sympy import *\n\nfrom client.exchange_service.client import BaseExchangeServerClient\nfrom protos.order_book_pb2 import Order\nfrom protos.service_pb2 import PlaceOrderResponse, CancelOrderResponse\nfrom protos.exchange_pb2 import MarketUpdate\n\ncurrent_position = {\"K\": 0, \"M\": 0, \"N\": 0, \"Q\": 0, \"U\": 0, \"V\": 0}\nquantity_to_order = {\"K\": 0, \"M\": 0, \"N\": 0, \"Q\": 0, \"U\": 0, \"V\": 0}\nfilled_amounts = {\"K\": 0, \"M\": 0, \"N\": 0, \"Q\": 0, \"U\": 0, \"V\": 0}\nviolation_risk = {\"K\": 0, \"M\": 0, \"N\": 0, \"Q\": 0, \"U\": 0, \"V\": 0}\norder_id_tracking = {}\nnet_position_error = int(0)\nrunning_total = 0\niteration = 1\nrunning_averages = np.array([0,0,0,0,0,0])\n\ndef prc_deviation_calc(prc_vec, expected_prc_vec = np.array([100.4478, 100.9738, 101.1336, 101.1543, 101.2378, 101.6016])):\n # Pick out mid_market_price vector\n prc_vec_mid = prc_vec[:,2]\n \n \n # Generate 6x6 matrix of expected price differences among assets\n exp_prc_diffs = np.zeros([6,6])\n for iexp1 in np.arange(len(expected_prc_vec)):\n for iexp2 in np.arange(len(expected_prc_vec)):\n exp_prc_diffs[iexp1,iexp2] = expected_prc_vec[iexp2] - expected_prc_vec[iexp1]\n \n # Generate 6x6 matrix of actual price differences among assets\n act_prc_diffs = np.zeros([6,6])\n for iact1 in np.arange(len(prc_vec_mid)):\n for iact2 in np.arange(len(prc_vec_mid)):\n act_prc_diffs[iact1,iact2] = prc_vec_mid[iact2] - prc_vec_mid[iact1]\n \n # Calculate difference between elements in actual price differences and expected price differences\n mag_diff_arr = exp_prc_diffs - act_prc_diffs\n return mag_diff_arr\n \n\ndef alloc_calc(prc_abnormality_arr,net_position_error):\n print(\"EXECUTING ALLOC_CALC\")\n # Serialize price abnormalities into column vector (first extract upper triangle of data)\n prc_abnormality_arr = np.triu(prc_abnormality_arr)\n prc_abnormality_col = np.concatenate(prc_abnormality_arr)\n #g\n # Remove the zero entries from this column vector\n prc_abnormality_col = np.concatenate(np.array([prc_abnormality_col[1:6],prc_abnormality_col[8:12],prc_abnormality_col[15:18],prc_abnormality_col[22:24],prc_abnormality_col[29:30]]))\n # Generate coefficient matrix\n A = np.zeros([16,6])\n Arow = 0\n while(Arow<14):\n for ia1 in np.arange(6):\n for ia2 in np.arange(ia1+1,6):\n A[Arow,ia1] = 1\n A[Arow,ia2] = 1\n Arow += 1\n # Append the quantity restriction terms\n A[15,:]= np.array([1, 1, 1, 1, 1, 1])\n prc_abnormality_col = np.append(prc_abnormality_col,net_position_error*-1)\n #g\n # Transpose coefficient matrix for least squares approximation\n A_T = np.transpose(A)\n #g\n # Find both sides of least squares equation, and form matrix to row reduce\n left_side_matrix = np.dot(A_T, A)\n right_side_matrix = np.dot(A_T, prc_abnormality_col)\n total_matrix = np.c_[left_side_matrix,right_side_matrix]\n #g\n total_matrix_rref = Matrix(total_matrix).rref()\n return np.array([total_matrix_rref[0].col(-1)])\n \ndef calc_vector_distance(vec):\n running_total = 0\n for i in np.arange(len(vec)):\n running_total += vec[i,0]**2\n return np.sqrt(running_total)\n\ndef calc_running_averages(current_prices, averages, iteration):\n for i in range(0,6):\n averages[i] = (current_prices[i,2] + averages[i]*(iteration-1)) / iteration\n return averages\n \n\nclass ExampleMarketMaker(BaseExchangeServerClient):\n \"\"\"A simple market making bot - shows the basics of subscribing\n to market updates and sending orders\"\"\"\n\n def __init__(self, *args, **kwargs):\n BaseExchangeServerClient.__init__(self, *args, **kwargs)\n\n self._orderids = set([])\n \n def _make_order(self, asset_code, quantity, price_input):\n return Order(asset_code = asset_code, quantity=quantity,\n order_type = Order.ORDER_LMT,\n price = price_input,\n competitor_identifier = self._comp_id)\n \n def _make_MKT_order(self, asset_code, quantity):\n return Order(asset_code = asset_code, quantity=quantity,\n order_type = Order.ORDER_MKT,\n competitor_identifier = self._comp_id)\n \n def handle_exchange_update(self, exchange_update_response):\n global current_position, quantity_to_order, filled_amounts, violation_risk, order_id_tracking, net_position_error, running_total, iteration, running_averages\n \n # Get current price data\n current_prices = np.zeros([6,3])\n for i,update in enumerate(exchange_update_response.market_updates):\n current_prices[i,:] = np.array([update.bids[0].price, update.asks[0].price, update.mid_market_price])\n print(\"Current Prices:\")\n print(current_prices)\n \n \n # Implement pricing strategy \n running_averages = calc_running_averages(current_prices,running_averages,iteration)\n prc_abnormality_arr = prc_deviation_calc(current_prices,running_averages)\n weight_vec = 50*alloc_calc(prc_abnormality_arr,net_position_error)\n weight_int = weight_vec.astype(int)\n print(\"Weight Vector:\")\n print(weight_int)\n \n # Check for filled quantities and update current position\n for i, update in enumerate(exchange_update_response.fills):\n print(update.filled_quantity*order_id_tracking[update.order.order_id])\n filled_amounts[update.order.asset_code] = update.filled_quantity*order_id_tracking[update.order.order_id]\n current_position[update.order.asset_code] += update.filled_quantity*order_id_tracking[update.order.order_id]\n \n # Cancel Existing Orders\n current_outstanding_orders = self._orderids.copy()\n for iorder_id in current_outstanding_orders:\n cancel_resp = self.cancel_order(iorder_id)\n if(cancel_resp.success != True):\n print(\"Error Canceling Order\",iorder_id)\n self._orderids.remove(iorder_id)\n \n # Calculate the ideal quantity to order\n current_position_avg = sum(current_position.values()) / len(current_position.values())\n for i, asset_code in enumerate([\"K\", \"M\", \"N\", \"Q\", \"U\", \"V\"]):\n quantity_to_order[asset_code] = int((weight_int[0,i] - current_position[asset_code] - current_position_avg))\n violation_risk[asset_code] = current_position[asset_code] + quantity_to_order[asset_code]\n \n print(\"Quantity to Order:\")\n print(quantity_to_order.values())\n print(\"Violation Risk:\")\n print(violation_risk.values())\n print(sum(list(violation_risk.values())))\n print(\"Current Position:\")\n print(current_position)\n # Ordering Logic\n if(abs(sum(list(violation_risk.values())))<50):\n for i, asset_code in enumerate([\"K\", \"M\", \"N\", \"Q\", \"U\", \"V\"]):\n quantity = quantity_to_order[asset_code]\n running_total += abs(quantity)\n print(\"Quantity to Order:\", quantity)\n \n # Make Orders\n if(quantity > 0):\n #order_resp = self.place_order(self._make_order(asset_code, quantity, round(current_prices[i,0],2))) # limit order\n order_resp = self.place_order(self._make_MKT_order(asset_code, quantity))\n order_id_tracking[order_resp.order_id] = 1\n elif(quantity < 0):\n #order_resp = self.place_order(self._make_order(asset_code, quantity, round(current_prices[i,1],2))) # limit order\n order_resp = self.place_order(self._make_MKT_order(asset_code, quantity))\n order_id_tracking[order_resp.order_id] = -1\n else:\n order_resp = \"Zero Quantity Order\"\n \n # implement error checking\n if type(order_resp) != PlaceOrderResponse:\n print(order_resp)\n else:\n self._orderids.add(order_resp.order_id)\n \n # Calculate feedback\n print(\"Error Correction:\")\n print(list(current_position.values()))\n print(sum(list(current_position.values())))\n net_position_error = 0\n while(iteration<20): iteration += 1\n \n # Track PnL\n print(\"PnL:\",exchange_update_response.competitor_metadata.pnl)\n print(\"Running Total:\",running_total)\n print(\"\\n\\n\")\n\n\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Run the exchange client')\n parser.add_argument(\"--server_host\", type=str, default=\"localhost\")\n parser.add_argument(\"--server_port\", type=str, default=\"50052\")\n parser.add_argument(\"--client_id\", type=str)\n parser.add_argument(\"--client_private_key\", type=str)\n parser.add_argument(\"--websocket_port\", type=int, default=5678)\n\n args = parser.parse_args()\n host, port, client_id, client_pk, websocket_port = (args.server_host, args.server_port,\n args.client_id, args.client_private_key,\n args.websocket_port)\n\n client = ExampleMarketMaker(host, port, client_id, client_pk, websocket_port)\n client.start_updates()\n" } ]
6
rishibhutada/Take_Home_Test
https://github.com/rishibhutada/Take_Home_Test
9e27d400de3388f5cec34d4227481c1ee537d0dc
d1029f7cac65deb25785c0ea276689e66697c63c
bc57ceac844283835ad70d03bd9525d3436297a7
refs/heads/master
2022-12-26T03:16:46.216368
2020-09-30T15:14:48
2020-09-30T15:14:48
298,983,738
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6206790208816528, "alphanum_fraction": 0.6293209791183472, "avg_line_length": 42.98642349243164, "blob_id": "3d52f8ba0d2d261d318fda689aa06e198bbfcb61", "content_id": "20110117d53dfcacb56c8c5699b3adec7660d908", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9720, "license_type": "no_license", "max_line_length": 128, "num_lines": 221, "path": "/AvgDurationPerDifficultyLevel.py", "repo_name": "rishibhutada/Take_Home_Test", "src_encoding": "UTF-8", "text": "from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import udf, when\nfrom pyspark.sql import functions as F\nimport os, glob\nimport configparser\nimport util\n\n\ndef get_minutes(df_to_be_formatted,prep_time):\n '''\n Converting Prep Time in numerical format to perform mathematical operation over it\n :param prep_time:\n :return: Time\n '''\n #Checking for String length and deciding course of action to take as follows:\n #Length is 4 and M is last Character 'PT5M' -> 5\n #Length is 4 and H is last Character 'PT4H' -> 240\n #Length is 5 'PT25M' -> 25\n #Length is 6 'PT5H5M' -> 305 ((5*60) + 5)\n #Length is 7 'PT5H25M' -> 325 ((5*60) + 25\n #Other than that 'PT' -> 0\n df_to_be_formatted = df_to_be_formatted.withColumn(prep_time, F.when((F.length(F.col(prep_time)) == 4) &\n (F.substring(F.col(prep_time), 4, 1) == 'M'),\n (F.substring(F.col(prep_time), 3, 1)).cast('int'))\n .when((F.length(F.col(prep_time)) == 4) & (F.substring(F.col(prep_time), 4, 1) == 'H'),\n ((F.substring(F.col(prep_time), 3, 1)).cast('int')) * 60)\n .when((F.length(F.col(prep_time)) == 5), (F.substring(F.col(prep_time), 3, 2)).cast('int'))\n .when((F.length(F.col(prep_time)) == 6), (((F.substring(F.col(prep_time), 3, 1)).cast('int')) * 60) +\n ((F.substring(F.col(prep_time), 5, 1)).cast('int')))\n .when((F.length(F.col(prep_time)) == 7), (((F.substring(F.col(prep_time), 3, 1)).cast('int')) * 60) +\n ((F.substring(F.col(prep_time), 5, 2)).cast('int'))).otherwise(F.lit(0))\n )\n\n return df_to_be_formatted\n\n\n@udf\ndef duration_in_proper_format(difficulty, avg_total_cooking_time):\n '''\n Converting the Duration of each difficulty type to readable format\n :param difficulty:\n :param avg_total_cooking_time:\n :return: Dataframe with Formated duration\n '''\n #Check if difficulty is hard, then convert duration in Hours and minutes form\n if difficulty == 'hard':\n rounded_duration = int(round(avg_total_cooking_time))\n hours = int(rounded_duration / 60)\n minutes = rounded_duration % 60\n return f\"{hours} hours & {minutes} minutes\"\n else:\n #If difficulty is easy or medium, keep it in minutes\n rounded_duration = int(round(avg_total_cooking_time))\n return f\"{rounded_duration} minutes\"\n\n\ndef filename_change(output_path,output_file_name):\n '''\n Renames the output file to the required name\n :param output_path:\n '''\n os.chdir(output_path) #Going tto the folder where we need to change the name\n #Iterating through each file name to check if it has '.csv' as its extension\n for file in glob.glob(\"*.csv\"):\n filename = file\n break\n\n new_path = output_path + filename #Creating path of the file to be renamed\n rename_path = output_path + output_file_name #Creating the new name of the file with which we want to rename\n\n #This renames the file to our specified name\n os.rename(new_path, rename_path)\n\n\ndef get_recipes_involving_beef(input_reciepie_df):\n '''\n Filter out only those recipes with Beef in the ingredients\n :param input_reciepie_df:\n :return: only_beef_in_ingredients_df\n '''\n\n # Selecting only the columns which are required for computations and dropping others\n input_reciepie_df = input_reciepie_df.select(\"name\", \"ingredients\", \"prepTime\", \"cookTime\")\n\n # Changing the case of the string so that it becomes easier for comparision\n is_beef_in_ingredients_df = input_reciepie_df.withColumn(\"ingredients_to_upper\",\n F.upper(input_reciepie_df['ingredients']))\n\n # Checking if 'BEEF' is there in the ingredients\n is_beef_in_ingredients_df = is_beef_in_ingredients_df.withColumn(\"contains_beef\",\n F.col('ingredients_to_upper').contains('BEEF'))\n\n\n # Filtering records with BEEF in the ingredients and drop the columns which are not required\n only_beef_in_ingredients_df = is_beef_in_ingredients_df.filter(F.col('contains_beef') == 'true') \\\n .drop('ingredients', 'ingredients_to_upper','contains_beef')\n\n return only_beef_in_ingredients_df\n\n\ndef calculate_total_cooking_time(only_beef_in_ingredients_df):\n '''\n Calcultes total Cooking time by adding prep time and cook time for each recipe\n :param only_beef_in_ingredients_df:\n :return: total_cook_time_df\n '''\n\n # Getting Preperation Time in proper format so that we can perform mathematical operations over it\n prep_time_formated_df = get_minutes(only_beef_in_ingredients_df,'prepTime')\n\n # Getting Cooking Time in proper format so that we can perform mathematical operations over it\n cook_time_formatted_df = get_minutes(prep_time_formated_df,'cookTime')\n\n # Calculating total cooking time by adding Cook Time and Prep time\n total_cook_time_df = cook_time_formatted_df.withColumn('total_cook_time', (\n F.col('prepTime') + F.col('cookTime')).cast('int'))\n\n # Dropping columns which are not required\n total_cook_time_df = total_cook_time_df.drop(\"prepTime\", \"cookTime\", \"name\",'contains_beef')\n\n return total_cook_time_df\n\n\ndef calculate_average_cooking_time_per_difficulty_level(total_cook_time_df):\n '''\n Gives difficulty level according to their individual cooking time and\n Calclates average cooking time for every difficulty level\n :param total_cook_time_df:\n :return: avg_cooking_time_df\n '''\n # Classifying the recipes the basis of their difficulty levels\n difficulty_level_classified_df = total_cook_time_df.withColumn(\"difficulty\",\n when(F.col('total_cook_time') <= 30, 'easy')\n .when(F.col('total_cook_time') <= 60, 'medium')\n .otherwise('hard'))\n\n # Calculating average duration per difficulty level\n difficulty_level_classified_df = difficulty_level_classified_df.groupby('difficulty').mean()\n\n #REduce the partitions to one as we have only 3 rows\n difficulty_level_classified_df.coalesce(1)\n\n # Ordering as easy, medium and hard\n difficulty_level_classified_df = difficulty_level_classified_df.orderBy('avg(total_cook_time)')\n\n # Changing the duration in a proper readable and presentable format\n avg_cooking_time_df = difficulty_level_classified_df.withColumn(\"avg_total_cooking_time\",\n duration_in_proper_format(F.col('difficulty'),\n F.col('avg(total_cook_time)')))\n\n # dropping the column which is not required\n avg_cooking_time_df = avg_cooking_time_df.drop('avg(total_cook_time)')\n\n return avg_cooking_time_df\n\n\ndef main():\n global logger\n\n #Initializing Configs and Paths\n config = configparser.ConfigParser()\n config.read('config.ini')\n input_path_S3 = config.get('INPUT_PATH', 'input_path')\n output_path = config.get('OUTPUT_PATH', 'output_path')\n output_file_name = config.get('OUTPUT_PATH', 'output_file_name')\n log_path = config.get('LOGS', 'log_path')\n environment = config.get('ENVIRONMENT', 'environment')\n\n #Initializing Logger\n logger = util.init_logger('AvgDurationPerDifficultyLevel.py',log_path)\n\n #Initializing SparkSession\n if environment == 'local':\n spark = SparkSession.builder.master('local[*]').appName('HelloFreshAssigment').getOrCreate()\n else:\n spark = SparkSession.builder.appName('HelloFreshAssigment').getOrCreate()\n\n logger.info(\"Spark Session Initialised\")\n\n try:\n #Reading File From Specified Path\n input_recipe_df = spark.read.json(input_path_S3)\n logger.info(f\"File got read successfully from the loation {input_path_S3}\")\n except:\n logger.error(f\"There is some issue with the file input path: {input_path_S3}\")\n\n #Calling function to get records which only contain ingredient beef\n only_beef_in_ingredients_df = get_recipes_involving_beef(input_recipe_df)\n\n #Calculating Total cooking time for each recipe\n total_cook_time_df = calculate_total_cooking_time(only_beef_in_ingredients_df)\n\n #Calculating Average Cooking time per difficulty level\n avg_cooking_time_df = calculate_average_cooking_time_per_difficulty_level(total_cook_time_df)\n\n try:\n #Writing data to the specified output path and overwriting if it already exists\n avg_cooking_time_df.coalesce(1)\\\n .write.mode('overwrite')\\\n .format(\"csv\") \\\n .option(\"header\", \"true\") \\\n .save(output_path)\n logger.info(f\"File got written successfully at the loation {output_path}\")\n except:\n logger.error(f\"There is some issue with the file input path: {output_path}\")\n\n #Stopping the Spark Session so that resources are released\n spark.stop()\n\n #As the file written by spark has a name something like - 'part000-*.csv'\n #We need to change it to reports.csv as per the deliverable requirement\n try:\n filename_change(output_path,output_file_name)\n logger.info(\"Filname changed Successfully\")\n except:\n logger.error(f\"There is some issue with {output_path} or {output_file_name}\")\n\n\n#Calling the main()\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.7721893787384033, "alphanum_fraction": 0.7781065106391907, "avg_line_length": 21.600000381469727, "blob_id": "0eea781a782587e68e30863c888b69597e42da2b", "content_id": "fd0f0b60fe604b03492f5f460240a006c4e8c665", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 338, "license_type": "no_license", "max_line_length": 81, "num_lines": 15, "path": "/config.ini", "repo_name": "rishibhutada/Take_Home_Test", "src_encoding": "UTF-8", "text": "[INPUT_PATH]\ninput_path = C://Users/Rishi/Desktop/HelloFreshAssignment/HelloFresh_Recipes.json\n\n[LOGS]\nlog_path = C://Users/Rishi/Desktop/HelloFreshAssignment/logs/\n\n[OUTPUT_PATH]\noutput_path = C://Users/Rishi/Desktop/HelloFreshAssignment/output/\noutput_file_name =reports.csv\n\n[APP]\nnum_partitions = 25\n\n[ENVIRONMENT]\nenvironment = local" }, { "alpha_fraction": 0.6839694380760193, "alphanum_fraction": 0.6854962110519409, "avg_line_length": 26.29166603088379, "blob_id": "72e6755678021ea0e04b72870834c447eea80da2", "content_id": "1ba8700d052aaf5f13d2183348627a6cf2120d40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "no_license", "max_line_length": 70, "num_lines": 24, "path": "/util.py", "repo_name": "rishibhutada/Take_Home_Test", "src_encoding": "UTF-8", "text": "import logging\nimport datetime\n\n# import cal\nformatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\ncurrtime = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\nlogger = ''\n\n\ndef init_logger(job_name, log_path):\n global logger\n log_file = \"%s/%s_%s.log\" % (log_path, job_name, currtime)\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n logger = logging.getLogger(job_name)\n logger.setLevel(logging.INFO)\n logger.addHandler(handler)\n return logger\n\n\ndef close_logger(logger):\n if len(logger.handlers) > 0:\n for handler in logger.handlers:\n logger.removeHandler(handler)\n" }, { "alpha_fraction": 0.5947271585464478, "alphanum_fraction": 0.6036174297332764, "avg_line_length": 41.93421173095703, "blob_id": "52588e87c53943a9b224ffedaac98badc5bf07ca", "content_id": "1c1572655763dfb504e83bd2cd187ed4a4b4ab20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3262, "license_type": "no_license", "max_line_length": 106, "num_lines": 76, "path": "/test_AvgDurationPerDifficultyLevel.py", "repo_name": "rishibhutada/Take_Home_Test", "src_encoding": "UTF-8", "text": "import unittest\nfrom unittest import TestCase\nfrom AvgDurationPerDifficultyLevel import get_recipes_involving_beef, calculate_total_cooking_time, \\\n calculate_average_cooking_time_per_difficulty_level, duration_in_proper_format\nfrom pyspark.sql import SparkSession\nimport util\n\n# Initializing Configs and Paths\ninput_path = \"C://Users/Rishi/Desktop/HelloFreshAssignment/sample.json\"\nlog_path = \"C://Users/Rishi/Desktop/HelloFreshAssignment/logs/test_logs\"\n\n# Initializing Logger\nlogger = util.init_logger('test_AvgDurationPerDifficultyLevel.py', log_path)\n\n# Initializing SparkSession\nspark = SparkSession.builder.master('local[*]').appName('HelloFreshAssigment').getOrCreate()\n\nlogger.info(\"Spark Session Initialised\")\n\ntry:\n # Reading File From local sample Json\n input_reciepie_df = spark.read.json(input_path)\n logger.info(f\"File got read successfully from the location {input_path}\")\nexcept:\n logger.error(f\"There is some issue with the file input path: {input_path}\")\n\n# Calling functions to get respective values in different stages\nonly_beef_in_ingredients_df = get_recipes_involving_beef(input_reciepie_df)\ntotal_cook_time_df = calculate_total_cooking_time(only_beef_in_ingredients_df)\navg_cooking_time_df = calculate_average_cooking_time_per_difficulty_level(total_cook_time_df)\n\n\nclass Test(TestCase):\n # Calling functions to respective values in different stages\n\n def test_get_recipies_involving_beef(self):\n time = 'prepTime'\n expected_beef_occurences = spark.createDataFrame(data=[['abc', 'PT5M', 'PT2H2M'],\n ['def', 'PT20M', 'PT3H25M'],\n ['ijk', 'PT5H', 'PT']],\n schema=['name', 'prepTime', 'cookTime'])\n\n self.assertTrue(expected_beef_occurences, only_beef_in_ingredients_df)\n\n def test_calculate_total_cooking_time(self):\n expected_cooking_time = spark.createDataFrame(data=[[25],\n [200],\n [45]],\n schema=['total_cook_time'])\n\n self.assertTrue(expected_cooking_time, total_cook_time_df)\n\n def test_calculate_average_cooking_time_per_difficulty_level(self):\n expected_avg_cooking_time = spark.createDataFrame(data=[['easy', '25 minutes'],\n ['medium', '45 minutes'],\n ['hard', '3 hours & 20 minutes']],\n schema=['difficulty', 'avg_total_cooking_time'])\n\n self.assertTrue(expected_avg_cooking_time, total_cook_time_df)\n\n def test_duration_in_proper_format(self):\n expected_duration = '2 hours & 25 minutes'\n\n test_df = spark.createDataFrame(data=[['hard', 145]], schema=['difficulty', 'total_duration'])\n\n actual_duration = duration_in_proper_format(test_df['difficulty'], test_df['total_duration'])\n\n self.assertTrue(expected_duration, actual_duration)\n\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()" }, { "alpha_fraction": 0.7703324556350708, "alphanum_fraction": 0.7785166501998901, "avg_line_length": 52.318180084228516, "blob_id": "f4fd040cb5362efa515f844845d02d5e7df49e45", "content_id": "b5579fa4c26c25552efe4a4ed77f54fd4b0f88d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5865, "license_type": "no_license", "max_line_length": 251, "num_lines": 110, "path": "/README.md", "repo_name": "rishibhutada/Take_Home_Test", "src_encoding": "UTF-8", "text": "# Average Cooking Duration Per Difficulty Level\n\nWe ingest events from our Kafka Stream and store them in our DataLake on s3. \nEvents are sorted by arriving date. For example `events/recipe_changes/2019/11/29`.\nDuring events processing we heavily rely on execution day to make sure we pick proper chunk of data and keep historical results.\nWe use Apache Spark to work with data and store it on s3 in parquet format. Our primary programming language is Python.\n\n# Exercise\n## Overview\nWe are interested in tracking changes to see available recipes, their cooking time and difficulty level.\n\n## Task 1\nUsing Apache Spark and Python, read and pre-process rows to ensure further optimal structure and performance \nfor further processing. \n\n## Task 2\nUsing Apache Spark and Python read processed dataset from step 1 and: \n1. extract only recipes that have `beef` as one of the ingredients\n2. calculate average cooking time duration per difficulty level\n\nTotal cooking time duration can be calculated by formula:\n```bash\ntotal_cook_time = cookTime + prepTime\n``` \n\nCriteria for levels based on total cook time duration:\n- easy - less than 30 mins\n- medium - between 30 and 60 mins\n- hard - more than 60 mins.\n\n## Deliverables\n- A deployable Spark Application written in Python\n- a README file with brief explanation of approach, data exploration and assumptions/considerations. \nYou can use this file by adding new section or create a new one.\n- a CSV file with average cooking time per difficulty level. Please add it to `output` folder.\nFile should have 2 columns: `difficulty,avg_total_cooking_time` and named as `report.csv`\n\n## Requirements\n- Well structured, object-oriented, documented and maintainable code\n- Unit tests to test the different components\n- Errors handling\n- Documentation\n- Solution is deployable and we can run it\n\n## Bonus points\n- Config handling\n- Logging and alerting\n- Consider scaling of your application\n- CI/CD explained\n- Performance tuning explained\n- We love clean and maintainable code\n- We appreciate good combination of Software and Data Engineering\n\n# Solution\n\n## Data Exploration\n Data has following nine columns:\n* name\n* ingredients\n* url\n* image\n* cookTime\n* recipeYield\n* datePublished\n* prepTime\n* description\n\n- Out of the above mentioned columns only four columns namely **name, ingredients, prepTime and cookTime** are important to us. So we will drop other columns initially and select only the required columns\n- *prepTime* and *cookTime* are not in desired format, so we need to convert them using an UDF. eg. We need to convert given prep time as 'PT50M' to 50 so that we can perform mathematical operations over it.\n- *ingredients* contains a long description of different ingredients with their respective quantity separated by '\\n'. We just need to check if in the list of ingredients, if beef is there or not. We will filter only those columns who pass this check.\n\n## Assumptions and Considerations\n- I have developed and executed the code on my local as I don't have any Spark cluster environment access for my personal work. Code is tested on local as well But it is scalable and can adapt to higher requirements on the go.\n- If *prepTime* is 'PT' that means it is zero.\n- *prepTime* or *cookTime* individually won't be more than 10 hours\n- *prepTime* or *cookTime* won't be null\n- Just kept the *name* column and did not drop it because data is distinguishable in initial stages\n- Need final output in some readable format like *2 hours and 10 minutes* rather than just *130* \n- We need to overwrite the *reports.csv* in every run because the value in the *Recipe Repository* keeps getting updated periodically\n- **0-30** minutes is *easy*\n- **31-60** minutes is *medium*\n- **61-onwards** minutes is *hard*\n- Rounded off the averages of the respective difficulty levels to nearest integer value\n\n## Explanation of Approach\n- Read the .json and stored it in a dataframe \n- First I dropped the columns which were not required and kept only those which were needed for the calculations\n- I then checked every row in *ingredients* column if *beef* was there present in it or not. If yes then it would add *true* to *contains_beef* column otherwise would add *false*\n- Next step was to filter the data with only true values and again dropping all the irrelevant columns\n- Then I converted the *prepTime* and *cookTime* in a proper integer format using pyspark.sql functions\n- Then added *prepTime* and *cookTime* to get *total_cook_time* for each recipe\n- After this I assigned difficulty levels to each recipe according to their respective *total_cook_time*\n- Then I calculated average of every *difficulty* level and did a *groupBy* over the *difficulty* level.\n- Then I did final rearrangements for data representation so that it looks in a presentable format and called an UDF for it\n- Then I written it to the specified folder and did a *coalesce(1)* so that only one file is written\n- I will stop spark session so that the resources are released\n- As the file written by spark has a different name than what we want, I will use python code to rename it to the desired name.\n\n## Performance Tuning\n- Dropped columns as soon as not required\n- Filtering the recipes only with ingredient as beef\n- Tried to perform maxiumum opertaions/functions using spark inbuilt functions rather than using udf\n- Stopping the Spark Session as soon as the Spark related tasks are done\n- Did a coalesce(1) as soon as we had only 3 rows with us\n- Took all the paths,names from config.ini so that we can pass as and when requirement changes on the go\n\n## CI/CD Explanation\n- Wrote unit test cases which covers every function in the code (*test_AvgDurationPerDifficultyLevel.py*)\n- If deployed and the test cases fail, the build will automatically fail\n- Tried covering maximum code in unit tests to increase overall code coverage\n" } ]
5
Alex-Philp/Sandbox
https://github.com/Alex-Philp/Sandbox
c5c48e2868a58c28b07c600fea1d34e81bcd543d
e22d3dd6076f86e9f37df51784502b51574965e3
8c5edbdae927e96cf440cfe29a2b76f6c1843a58
refs/heads/master
2020-07-08T04:55:51.798891
2019-08-21T11:48:40
2019-08-21T11:48:40
203,571,197
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 23, "blob_id": "087075750984ebb472956eff1a9c01c004f137aa", "content_id": "f99a7e9e875c95e2676216d078d72ceaa01e2baa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 48, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/README.md", "repo_name": "Alex-Philp/Sandbox", "src_encoding": "UTF-8", "text": "# Sandbox\nproject for miscellaneous minor tests\n" }, { "alpha_fraction": 0.5962733030319214, "alphanum_fraction": 0.6024844646453857, "avg_line_length": 21.928571701049805, "blob_id": "f7169392f5da73dff615bffd0fa4877b805b327b", "content_id": "40395dda977810bb23eb4ef4b68ffa788affe7aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 322, "license_type": "no_license", "max_line_length": 53, "num_lines": 14, "path": "/password_entry.py", "repo_name": "Alex-Philp/Sandbox", "src_encoding": "UTF-8", "text": "MIN_LENGTH = 5\nis_valid = False\n\npassword = input(\"Please enter a password: \")\n\nwhile not is_valid:\n if len(password) > MIN_LENGTH:\n is_valid = True\n else:\n print(\"Password is not valid\")\n password = input(\"Please enter a password: \")\n\nfor i in range(0, len(password)):\n print('*', end=' ')\n\n" } ]
2
theparadoxer02/koinex
https://github.com/theparadoxer02/koinex
32f8d622b4eeba5d73c77c0a1ac62cb5b6995e11
70c76c9f199e60ba4d96cdc66e030e5a92b77d56
204828aad5c3b5c4c8417a2bc011dbe031b911a4
refs/heads/master
2021-08-19T16:18:36.582328
2017-11-26T21:57:34
2017-11-26T21:57:34
112,112,790
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8207547068595886, "alphanum_fraction": 0.8207547068595886, "avg_line_length": 29.428571701049805, "blob_id": "7eeebb98ae055df38879bafde4bb6c53f086f6fb", "content_id": "55401df6e2f067bcafc04851bd73236a311a1454", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 58, "num_lines": 7, "path": "/accounts/admin.py", "repo_name": "theparadoxer02/koinex", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Account, KYC, KYC_Document, BankDetail\n\nadmin.site.register(Account)\nadmin.site.register(KYC)\nadmin.site.register(KYC_Document)\nadmin.site.register(BankDetail)" }, { "alpha_fraction": 0.6193029284477234, "alphanum_fraction": 0.6193029284477234, "avg_line_length": 16.230770111083984, "blob_id": "5696c8e853d1712a2d05a6cc9e844b425c392e8d", "content_id": "55dfb03c7c624cb3b697f055501cddcf1ad53f17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1119, "license_type": "no_license", "max_line_length": 66, "num_lines": 65, "path": "/accounts/api/serializers.py", "repo_name": "theparadoxer02/koinex", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom accounts.models import Account, KYC, KYC_Document, BankDetail\n\nclass AccountSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Account\n\t\tfields = [\n\t\t\t'status_option',\n\t\t\t'user',\n\t\t\t'first_name',\n\t\t\t'last_name',\n\t\t\t'email',\n\t\t\t'asknbid_id',\n\t\t\t'account_status',\n\t\t\t'accont_created_on',\n\t\t]\n\n\nclass KYCSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = KYC\n\t\tfields = [\n\t\t\t\t'kyc_status',\n\t\t\t\t'Account',\n\t\t\t\t'dob',\n\t\t\t\t'full_name',\n\t\t\t\t'pan_number',\n\t\t\t\t'adhaar_no',\n\t\t\t\t'gross_annual_income',\n\t\t\t\t'residential_status',\n\t\t\t\t'street_address',\n\t\t\t\t'city',\n\t\t\t\t'state',\n\t\t\t\t'country',\n\t\t\t\t'pin_code',\n\t\t\t\t'kyc_status',\n\t\t\t\t'valid'\n\t\t\t]\n\nclass KYC_DocumentSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = KYC_Document\n\t\tfields = [\n\t\t\t'Account',\n\t\t\t'pan_card',\n\t\t\t'adhaar_card',\n\t\t\t'adhaar_back',\n\t\t\t'photograph',\n\t\t\t'valid',\n\t\t]\n\n\nclass BankDetailSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = BankDetail\n\t\tfields = [\n\t\t\t'Account',\n\t\t\t'ifsc_code',\n\t\t\t'time',\n\t\t\t'source',\n\t\t\t'ip',\n\t\t\t'activity',\n\t\t\t'status',\n\t\t\t'valid'\n\t\t]" }, { "alpha_fraction": 0.5597800016403198, "alphanum_fraction": 0.5734098553657532, "avg_line_length": 52.61538314819336, "blob_id": "6f8dd4be53ccde109ce2b06d9cf8c2b3daaf97b2", "content_id": "d630bda2c63820fdd3a495147eb9ba5daea70f42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4182, "license_type": "no_license", "max_line_length": 158, "num_lines": 78, "path": "/accounts/migrations/0001_initial.py", "repo_name": "theparadoxer02/koinex", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.3 on 2017-11-26 20:58\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Account',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('first_name', models.CharField(default='', max_length=40)),\n ('last_name', models.CharField(default='', max_length=40)),\n ('email', models.EmailField(max_length=254)),\n ('asknbid_id', models.CharField(max_length=30, null=True, unique=True)),\n ('account_status', models.CharField(choices=[('Submitted', 'Submitted'), ('Verified', 'Verified'), ('Rejected', 'Rejected')], max_length=20)),\n ('accont_created_on', models.DateField(auto_now=True)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='BankDetail',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('ifsc_code', models.CharField(default='', max_length=10)),\n ('time', models.TimeField(auto_now_add=True)),\n ('source', models.CharField(default='', max_length=100)),\n ('ip', models.GenericIPAddressField()),\n ('activity', models.TextField()),\n ('status', models.BooleanField(default=False)),\n ('valid', models.BooleanField(default=False)),\n ('Account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Account')),\n ],\n ),\n migrations.CreateModel(\n name='KYC',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('dob', models.DateField()),\n ('full_name', models.CharField(max_length=100)),\n ('pan_number', models.CharField(default='', max_length=10)),\n ('adhaar_no', models.CharField(default='', max_length=12)),\n ('gross_annual_income', models.CharField(default='', max_length=12)),\n ('residential_status', models.CharField(default='', max_length=200)),\n ('street_address', models.CharField(default='', max_length=30)),\n ('city', models.CharField(default='', max_length=30)),\n ('state', models.CharField(default='', max_length=30)),\n ('country', models.CharField(default='', max_length=30)),\n ('pin_code', models.CharField(default='', max_length=10)),\n ('kyc_status', models.CharField(choices=[('Submitted', 'Submitted'), ('Pending', 'Pending'), ('Verified', 'Verified')], max_length=30)),\n ('valid', models.BooleanField(default=False)),\n ('Account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Account')),\n ],\n ),\n migrations.CreateModel(\n name='KYC_Document',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('pan_card', models.ImageField(upload_to='media/upload/pancard')),\n ('adhaar_card', models.ImageField(upload_to='media/upload/adhaarcard')),\n ('adhaar_back', models.ImageField(upload_to='media/upload/adhaarback')),\n ('photograph', models.ImageField(upload_to='media/upload/photograph')),\n ('valid', models.BooleanField(default=False)),\n ('Account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Account')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.8380829095840454, "alphanum_fraction": 0.8380829095840454, "avg_line_length": 26.60714340209961, "blob_id": "302b31471f513277023f37f68d9856d9f67ba7eb", "content_id": "29193ae1e891e1e60133eea0ee8501cac77b178f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 772, "license_type": "no_license", "max_line_length": 56, "num_lines": 28, "path": "/accounts/api/views.py", "repo_name": "theparadoxer02/koinex", "src_encoding": "UTF-8", "text": "from rest_framework import generics\nfrom rest_framework import permissions\nfrom . serializers import (\n\tBankDetailSerializer,\n\tKYCSerializer,\n\tKYC_DocumentSerializer,\n\tAccountSerializer,\n)\n\n\nclass BankDetailCreateApiView(generics.CreateAPIView):\n\tserializer_class = BankDetailSerializer\n\tpermission_classes = [permissions.IsAuthenticated]\n\n\nclass KYC_DocumentCreateApiView(generics.CreateAPIView):\n\tserializer_class = KYC_DocumentSerializer\n\tpermission_classes = [permissions.IsAuthenticated]\n\n\nclass KYCCreateApiView(generics.CreateAPIView):\n\tserializer_class = KYCSerializer\n\tpermission_classes = [permissions.IsAuthenticated]\n\n\nclass AccountCreateApiView(generics.CreateAPIView):\n\tserializer_class = AccountSerializer\n\tpermission_classes = [permissions.IsAuthenticated]" }, { "alpha_fraction": 0.7381974458694458, "alphanum_fraction": 0.7381974458694458, "avg_line_length": 28.1875, "blob_id": "3346b4e0c2ca4d23d424fc6ccbcd269e37992b5d", "content_id": "7dbef12c20c3513b4b1bd37085010b030931db0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 466, "license_type": "no_license", "max_line_length": 81, "num_lines": 16, "path": "/accounts/api/urls.py", "repo_name": "theparadoxer02/koinex", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom accounts.api.views import (\n\tBankDetailCreateApiView,\n\tKYCCreateApiView,\n\tKYC_DocumentCreateApiView,\n\tAccountCreateApiView\n\t)\n\nurlpatterns = [\n\turl(r'^accounts/$', AccountCreateApiView.as_view(), name='accounts'),\n\turl(r'^bank/$', BankDetailCreateApiView.as_view(), name='bankdetail'),\n\turl(r'^kyc/$', KYCCreateApiView.as_view(), name='kyc'),\n\turl(r'^kycdocument/$', KYC_DocumentCreateApiView.as_view(), name='kycdocument'),\n\n]" }, { "alpha_fraction": 0.7178261876106262, "alphanum_fraction": 0.7297700643539429, "avg_line_length": 35.41304397583008, "blob_id": "cf305fcf5ac59903f0c5b0c5d1e72c5410efb0d7", "content_id": "f62ca3186c733146e5580f0700ce3fc876d7d366", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3349, "license_type": "no_license", "max_line_length": 72, "num_lines": 92, "path": "/accounts/models.py", "repo_name": "theparadoxer02/koinex", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django import forms\n\n\nclass Account(models.Model):\n\t\" Accounts \"\n\tstatus_option = (\n\t\t('Submitted', 'Submitted'),\n\t\t('Verified', 'Verified',),\n\t\t('Rejected', 'Rejected'),\n\t)\n\tuser = models.OneToOneField(User, on_delete=models.CASCADE)\n\tfirst_name = models.CharField(max_length=40, default='', null=False)\n\tlast_name = models.CharField(max_length=40, default='', null=False)\n\temail = models.EmailField(null=False, max_length=254)\n\tasknbid_id = models.CharField(max_length=30, unique=True, null=True)\n\taccount_status = models.CharField(max_length=20, choices=status_option)\n\taccont_created_on = models.DateField(auto_now=True, auto_now_add=False)\n\tvalid = models.BooleanField(default=False)\n\n\tdef __str__(self):\n\t\treturn self.first_name\n\nclass KYC(models.Model):\n\tkyc_status = (\n\t\t('Submitted', 'Submitted'),\n\t\t('Pending','Pending'),\n\t\t('Verified','Verified'),\n\t)\n\tAccount = models.ForeignKey(Account, on_delete=models.CASCADE)\n\tdob = models.DateField(auto_now=False, auto_now_add=False)\n\tfull_name = models.CharField(max_length=100, null=False)\n\tpan_number = models.CharField(max_length=10, default='')\n\tadhaar_no = models.CharField(max_length=12, default='')\n\tgross_annual_income = models.CharField(max_length=12, default='')\n\tresidential_status = models.CharField(max_length=200, default='')\n\tstreet_address = models.CharField(max_length=30, default='')\n\tcity = models.CharField(max_length=30, default='')\n\tstate = models.CharField(max_length=30, default='')\n\tcountry = models.CharField(max_length=30, default='')\n\tpin_code = models.CharField(max_length=10, default='')\n\tkyc_status = models.CharField(max_length=30, choices=kyc_status)\n\tvalid = models.BooleanField(default=False)\n\n\tdef __str__(self):\n\t\treturn self.full_name\n\n\tdef save(self, *args, **kwargs):\n\t\tif self.Account.valid is False:\n\t\t\traise forms.ValidationError('The User is not Verified yet')\n\t\telse:\n\t\t\tsuper(KYC_Document, self).save(*args, **kwargs)\n\n\nclass KYC_Document(models.Model):\n\tAccount = models.ForeignKey(Account, on_delete=models.CASCADE)\n\tpan_card = models.ImageField(upload_to='media/upload/pancard')\n\tadhaar_card = models.ImageField(upload_to='media/upload/adhaarcard')\n\tadhaar_back = models.ImageField(upload_to='media/upload/adhaarback')\n\tphotograph = models.ImageField(upload_to='media/upload/photograph')\n\tvalid = models.BooleanField(default=False)\n\n\tdef __str__(self):\n\t\treturn str(self.Account)\n\n\tdef save(self, *args, **kwargs):\n\t\tif self.Account.valid is False:\n\t\t\traise forms.ValidationError('The User is not Verified yet')\n\t\telse:\n\t\t\tsuper(KYC_Document, self).save(*args, **kwargs)\n\n\nclass BankDetail(models.Model):\n\tAccount = models.ForeignKey(Account, on_delete=models.CASCADE)\n\tifsc_code = models.CharField(max_length=10, default='', null=False)\n\ttime = models.TimeField(auto_now=False, auto_now_add=True)\n\t# Not sure about it\n\tsource = models.CharField(max_length=100, default='')\n\tip = models.GenericIPAddressField()\n\tactivity = models.TextField()\n\tstatus = models.BooleanField(default=False)\n\tvalid = models.BooleanField(default=False)\n\n\tdef __str__(self):\n\t\treturn str(self.Account)\n\n\tdef save(self, *args, **kwargs):\n\t\tif self.Account.valid is False:\n\t\t\traise forms.ValidationError('The User is not Verified yet')\n\t\telse:\n\t\t\tsuper(KYC_Document, self).save(*args, **kwargs)" } ]
6
scanfyu/datacode
https://github.com/scanfyu/datacode
17d26c5278dbdce2cff61404fe46cd34b364a75c
ddf4371f92925584f8c993eed70647a4a6f6c898
aeb5105f929c47db138654f1f9f139609f7e878e
refs/heads/master
2021-01-22T06:06:48.478231
2017-03-06T16:57:03
2017-03-06T16:57:03
81,733,600
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6630963683128357, "alphanum_fraction": 0.6694254875183105, "avg_line_length": 37.03703689575195, "blob_id": "7f5d601083dedac9bb5ba76a6ae2132012a5cf03", "content_id": "56ea2536afeb1b299cb5dfac00c01041c2d6f4a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2082, "license_type": "no_license", "max_line_length": 135, "num_lines": 54, "path": "/csv.py", "repo_name": "scanfyu/datacode", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n'''\nimport csv\nimport xlrd\n\nimport unicodecsv \n\ndef read_csv(filename): #iterators\n with open(filename, 'rb') as f: #Rb means opened for reading; b flag changee format of how the files is read.\n reader = unicodecsv.DictReader(f) #DictReader means each row would be a dictionary. if data has a header row.\n return list(reader) #with: indent all the code_file automatically closed.\n #将迭代器转换为列表的简单方法 list(reader)\n\nenrollments = read_csv('enrollments.csv') #create a list and input data\ndaily_engagement = read_csv('daily_engagement.csv')\nproject_submissions = read_csv('project_submissions.csv')\n'''\n\n\nimport unicodecsv\n\nenrollments_filename = '/datasets/ud170/udacity-students/enrollments.csv'\n\n## Longer version of code (replaced with shorter, equivalent version below)\n\n# enrollments = []\n# f = open(enrollments_filename, 'rb')\n# reader = unicodecsv.DictReader(f)\n# for row in reader:\n# enrollments.append(row)\n# f.close()\n\ndef openfile(filename):\n with open(filename, 'rb') as f:\n reader = unicodecsv.DictReader(f)\n return list(reader)\n \n### Write code similar to the above to load the engagement\n### and submission data. The data is stored in files with\n### the given filenames. Then print the first row of each\n### table to make sure that your code works. You can use the\n### \"Test Run\" button to see the output of your code.\n\nengagement_filename = '/datasets/ud170/udacity-students/daily_engagement.csv'\nsubmissions_filename = '/datasets/ud170/udacity-students/project_submissions.csv'\n\nenrollments = openfile(enrollments_filename) \ndaily_engagement = openfile(engagement_filename) # Replace this with your code\nproject_submissions = openfile(submissions_filename) # Replace this with your code\n\nprint (enrollments[0])\nprint (daily_engagement[0])\nprint (project_submissions[0])\n" }, { "alpha_fraction": 0.694505512714386, "alphanum_fraction": 0.6996337175369263, "avg_line_length": 34.02564239501953, "blob_id": "c22339dd29fd0cdeef00e8608709e484ea5c608e", "content_id": "4a2109676940fbd579acffc888e82fb0ed2a2d47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1365, "license_type": "no_license", "max_line_length": 80, "num_lines": 39, "path": "/retype.py", "repo_name": "scanfyu/datacode", "src_encoding": "UTF-8", "text": "import unicodecsv\n\ndef read_csv(filename):\n with open(filename, 'rb') as f:\n reader = unicodecsv.DictReader(f)\n return list(reader)\n \nenrollments = read_csv('F:\\\\data\\\\DAND\\\\csv\\\\enrollments.csv')\ndaily_engagement = read_csv('F:\\\\data\\\\DAND\\\\csv\\\\daily_engagement.csv')\nproject_submissions = read_csv('F:\\\\data\\\\DAND\\\\csv\\\\project_submissions.csv')\n\nprint (enrollments[0])\nprint (daily_engagement[0])\nprint (project_submissions[0])\n \n### For each of these three tables, find the number of rows in the table and\n### the number of unique students in the table. To find the number of unique\n### students, you might want to create a set of the account keys in each table.\n\ndef account_num(files):\n num = set()\n for file in files:\n num.add(file['account_key'])\n return len(num)\n\n\nenrollment_num_rows = len(enrollments) # Replace this with your code\n\nenrollment_num_unique_students = set() # Replace this with your code\nfor enrollment in enrollments:\n enrollment_num_unique_students.add(enrollment['account_key'])\nlen(enrollment_num_unique_students)\n\n\nengagement_num_rows = 0 # Replace this with your code\nengagement_num_unique_students = 0 # Replace this with your code\n\nsubmission_num_rows = 0 # Replace this with your code\nsubmission_num_unique_students = 0 # Replace this with your code" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 11.5, "blob_id": "f8f4732e863521e000f42e4472f03881555233ec", "content_id": "89fe2d72db6fca1705bd9ace12b9d1e1f91a5c51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 25, "license_type": "no_license", "max_line_length": 13, "num_lines": 2, "path": "/README.md", "repo_name": "scanfyu/datacode", "src_encoding": "UTF-8", "text": "# datacode\nmy data code.\n" }, { "alpha_fraction": 0.5832759737968445, "alphanum_fraction": 0.6042670607566833, "avg_line_length": 25.66972541809082, "blob_id": "4d7894dfdb1062af55a5be686d892b47831af516", "content_id": "5cec0a30b0cef53402ff498e6a683dc6c68c7387", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3002, "license_type": "no_license", "max_line_length": 216, "num_lines": 109, "path": "/parsecsv.py", "repo_name": "scanfyu/datacode", "src_encoding": "UTF-8", "text": "'''\nimport os\nimport csv #csv处理模块\nimport xlrd #处理xls & xlsx文件\nimport xlwt #创建xls文件\n\nDATADIR = \"\"\nDATAFILE = \"F:\\\\data\\\\beatles-diskography.csv\"\n\n\ndef parse_file(datafile):\n data = []\n with open(datafile, \"rb\") as f:\n header = f.readline().split(\",\") #读取头标区,传入19行\n counter = 0\n for line in f:\n if counter == 10:\n break\n \n fields = line.split(\",\") #使用,以切分数据\n entry = {}\n \n for i, value in enumerate(fields): #使用枚举遍历并存入索引、数据\n entry[header[i].strip()] = value.strip() #strip()清理空白区域\n \n data.append(entry)\n counter += 1\n\n return data\n\n\ndef test():\n # a simple test of your implemetation\n datafile = os.path.join(DATADIR, DATAFILE)\n d = parse_file(datafile)\n firstline = {'Title': 'Please Please Me', 'UK Chart Position': '1', 'Label': 'Parlophone(UK)', 'Released': '22 March 1963', 'US Chart Position': '-', 'RIAA Certification': 'Platinum', 'BPI Certification': 'Gold'}\n tenthline = {'Title': '', 'UK Chart Position': '1', 'Label': 'Parlophone(UK)', 'Released': '10 July 1964', 'US Chart Position': '-', 'RIAA Certification': '', 'BPI Certification': 'Gold'}\n\n assert d[0] == firstline\n assert d[9] == tenthline\n\n \ntest()\n'''\n\n\n#!/usr/bin/env python\n\"\"\"\nYour task is as follows:\n- read the provided Excel file\n- find and return the min, max and average values for the COAST region\n- find and return the time value for the min and max entries\n- the time values should be returned as Python tuples\n\nPlease see the test function for the expected return format\n\"\"\"\n\nimport xlrd\nfrom zipfile import ZipFile\ndatafile = \"2013_ERCOT_Hourly_Load_Data.xls\"\n\n\ndef open_zip(datafile):\n with ZipFile('{0}.zip'.format(datafile), 'r') as myzip:\n myzip.extractall()\n\n\ndef parse_file(datafile):\n workbook = xlrd.open_workbook(datafile)\n sheet = workbook.sheet_by_index(0)\n\n data = [[sheet.cell_value(r, col) for col in range(sheet.ncols)] for r in range(sheet.nrows)]\n\n cv = sheet.col_values(1, start_rowx=1, end_rowx=None)\n\n maxval = max(cv)\n minval = min(cv)\n\n maxpos = cv.index(maxval) + 1\n minpos = cv.index(minval) + 1\n\n maxtime = sheet.cell_value(maxpos, 0)\n realtime = xlrd.xldate_as_tuple(maxtime, 0)\n mintime = sheet.cell_value(minpos, 0)\n realmintime = xlrd.xldate_as_tuple(mintime, 0)\n\n data = {\n 'maxtime': realtime,\n 'maxvalue': maxval,\n 'mintime': realmintime,\n 'minvalue': minval,\n 'avgcoast': sum(cv) / float(len(cv))\n }\n return data\n\ndata = parse_file(datafile)\nimport pprint\npprint.pprint(data)\n\n\ndef test():\n open_zip(datafile)\n data = parse_file(datafile)\n\n assert data['maxtime'] == (2013, 8, 13, 17, 0, 0)\n assert round(data['maxvalue'], 10) == round(18779.02551, 10)\n\n\ntest()" } ]
4
narayanaditya95/amy
https://github.com/narayanaditya95/amy
326161e14b969cc369f8b3c5fe737d6d21977a88
8a62ada8ec9939c6989fe292d3203f1f80ed702f
a33a31fc863d44638ed09a1c71e4756266737891
refs/heads/master
2020-12-13T17:23:06.014898
2016-10-09T06:32:07
2016-10-09T06:32:07
31,808,047
1
0
null
2015-03-07T10:01:18
2015-03-05T23:25:52
2015-03-05T23:25:51
null
[ { "alpha_fraction": 0.5489935874938965, "alphanum_fraction": 0.5531961917877197, "avg_line_length": 31.525178909301758, "blob_id": "5dc57c90d553f38c89076e03d96978396369ffe6", "content_id": "da287a2c1f17cb64ce994fc406cc63994b15f7ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4521, "license_type": "permissive", "max_line_length": 89, "num_lines": 139, "path": "/pydata/api.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "from functools import lru_cache\nfrom json import JSONDecodeError\nfrom urllib.parse import urljoin, urlparse\n\nimport requests\nfrom django.conf import settings\n\nfrom workshops.models import (\n Person,\n Role,\n Organization,\n Sponsorship,\n Task,\n)\nfrom workshops.util import create_username\n\n\nclass BaseAPIClient(requests.Session):\n \"\"\"\n An API client that abstracts away the work of dealing with URLs.\n Usage:\n > client = APIClient(event)\n > list(client) -> returns a list of all objects returned by the API.\n > client[23] -> returns the object with pk=23\n \"\"\"\n ROOT_ENDPOINT = 'api/'\n\n @lru_cache(maxsize=None)\n def __new__(cls, event):\n \"\"\"\n Returns an instance of APIClient.\n Throws NotImplementedError if an API does not exist at the root URL.\n \"\"\"\n try:\n r = requests.get(urljoin(event.url, cls.ROOT_ENDPOINT))\n r.raise_for_status()\n r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError):\n raise NotImplementedError('Conference site does not support an API')\n return super().__new__(cls)\n\n def __init__(self, event):\n '''Populate API endpoint and set up basic authentication'''\n super().__init__()\n self.event = event\n self.endpoint = urljoin(event.url, self.ENDPOINT)\n self.auth = (\n settings.PYDATA_USERNAME_SECRET, settings.PYDATA_PASSWORD_SECRET)\n\n def __iter__(self):\n try:\n r = self.get(self.endpoint)\n r.raise_for_status()\n pydata_objs = r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError) as e:\n raise IOError('Cannot fetch instances from API: {}'.format(str(e)))\n for obj in pydata_objs:\n yield self.parse(obj)\n\n def __contains__(self, pk):\n try:\n self.get(self.endpoint + str(pk)).raise_for_status()\n except requests.exceptions.HTTPError:\n return False\n else:\n return True\n\n def __getitem__(self, pk):\n if pk not in self:\n raise KeyError(\n '{} does not exist'.format(self.model._meta.verbose_name)\n )\n obj = self.get(self.endpoint + str(pk)).json()\n return self.parse(obj)\n\n\nclass PersonAPIClient(BaseAPIClient):\n ENDPOINT = 'api/speaker/'\n model = Person\n\n def parse(self, speaker):\n speaker['name'] = speaker['name'].strip()\n personal = speaker['name'].rsplit(' ', 1)[0]\n family = speaker['name'].rsplit(' ', 1)[-1]\n return Person(\n username=speaker['username'],\n personal=personal,\n family=family,\n email=speaker['email'],\n url=speaker['absolute_url'],\n )\n\n\nclass TaskAPIClient(BaseAPIClient):\n ENDPOINT = 'api/presentation/'\n model = Task\n\n def parse(self, presentation):\n return Task(\n event=self.event,\n person=Person.objects.get_or_create(\n email=presentation['speaker']['email'],\n defaults={\n 'username': create_username('', presentation['speaker']['username']),\n 'personal': presentation['speaker']['name'].rsplit(' ', 1)[0],\n 'family': presentation['speaker']['name'].rsplit(' ', 1)[-1],\n 'url': presentation['speaker']['absolute_url'],\n }\n )[0],\n role=Role.objects.get(name='presenter'),\n title=presentation['title'],\n url=presentation['absolute_url'],\n )\n\n\nclass SponsorshipAPIClient(BaseAPIClient):\n ENDPOINT = 'api/sponsor/'\n model = Sponsorship\n\n def parse(self, sponsor):\n return Sponsorship(\n organization=Organization.objects.get_or_create(\n domain=urlparse(sponsor['external_url']).netloc,\n defaults={\n 'fullname': sponsor['name'],\n 'notes': sponsor['annotation'],\n },\n )[0],\n event=self.event,\n amount=sponsor['level']['cost'],\n contact=Person.objects.get_or_create(\n email=sponsor['contact_email'],\n defaults={\n 'username': create_username('', sponsor['contact_name']),\n 'personal': sponsor['contact_name'].rsplit(' ', 1)[0],\n 'family': sponsor['contact_name'].rsplit(' ', 1)[-1],\n },\n )[0],\n )\n" }, { "alpha_fraction": 0.5950577855110168, "alphanum_fraction": 0.6030290722846985, "avg_line_length": 38.203125, "blob_id": "a8ec8c2d1549d23fcbc17d1cffc227778f0b8127", "content_id": "198873fc47daf75fe69b1c3c67e0d137e7499abb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2509, "license_type": "permissive", "max_line_length": 78, "num_lines": 64, "path": "/workshops/test/test_badge.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "from django.core.urlresolvers import reverse\n\nfrom .base import TestBase\nfrom ..models import Badge, Award, Person, Event\n\n\nclass TestBadge(TestBase):\n \"\"\"Tests for badge model and views, including some tests for awards.\"\"\"\n\n def setUp(self):\n super().setUp()\n self._setUpUsersAndLogin()\n\n def test_badge_display(self):\n \"\"\"Ensure the badge is displayed correctly on its details page.\"\"\"\n rv = self.client.get(reverse('badge_details',\n args=(self.swc_instructor.name, )))\n content = rv.content.decode('utf-8')\n assert self.swc_instructor.name in content\n assert self.swc_instructor.title in content\n assert self.swc_instructor.criteria in content\n self._check_status_code_and_parse(rv, 200)\n\n def test_badge_display_awards(self):\n \"Ensure awards are displayed correctly on their badge details page.\"\n rv = self.client.get(reverse('badge_details',\n args=(self.swc_instructor.name, )))\n content = rv.content.decode('utf-8')\n\n awards = self.swc_instructor.award_set.all()\n for award in awards:\n assert award.person.get_full_name() in content, \\\n \"Award for {} not found\".format(award.person)\n\n def test_badge_award(self):\n \"\"\"Ensure we can add awards from badge_award page.\"\"\"\n url, values = self._get_initial_form('badge_award',\n self.swc_instructor.name)\n values['person_1'] = self.spiderman.id\n\n # to override django-selectable behavior\n values['person_0'] = ''\n values['event_1'] = ''\n values['event_0'] = ''\n values['awarded_by_0'] = ''\n values['awarded_by_1'] = ''\n\n assert self.swc_instructor.award_set.count() == 3\n\n response = self.client.post(url, values)\n self.assertEqual(response.status_code, 302)\n\n assert self.swc_instructor.award_set.count() == 4\n\n def test_remove_award(self):\n \"Remove a badge from someone (ie. remove corresponding Award object).\"\n person = self.hermione\n award = person.award_set.all()[0]\n badge = award.badge\n # test first URL\n rv = self.client.post(reverse('award_delete', args=[award.pk, ]))\n assert rv.status_code == 302\n assert award not in badge.award_set.all() # award really removed\n assert badge not in person.badges.all() # badge not avail. via Awards\n" }, { "alpha_fraction": 0.5444332957267761, "alphanum_fraction": 0.5508886575698853, "avg_line_length": 38.366336822509766, "blob_id": "522ddb172912610c9c2e02bd64bdc367abffcb01", "content_id": "49f485098ad82350b2b57e1e21b8266c672e0732", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11928, "license_type": "permissive", "max_line_length": 83, "num_lines": 303, "path": "/workshops/management/commands/fake_database.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "from datetime import timedelta, datetime\nimport itertools\nimport random\n\nfrom django.core.management.base import BaseCommand\nfrom django_countries import countries as Countries\nfrom faker import Faker\n\nfrom workshops.models import (\n Airport,\n Role,\n Tag,\n Badge,\n Lesson,\n Person,\n Award,\n Qualification,\n Organization,\n Event,\n Task,\n)\n\n\nclass Command(BaseCommand):\n help = 'Add fake data to the database.'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--seed', action='store', default=None,\n help='Provide an initial seed for randomization mechanism.',\n )\n\n def fake_airports(self, faker, count=5):\n \"\"\"Add some airports.\"\"\"\n # we're not doing anything here, since:\n # 1. data migrations add some airports already\n # 2. we'll have more airports as fixtures as of #626\n pass\n\n def fake_roles(self, faker):\n \"\"\"Provide fixed roles (before they end up in fixtures, see #626).\"\"\"\n roles = [\n ('organizer', 'Workshop organizer'),\n ('learner', 'Learner'),\n ('host', 'Workshop host'),\n ('instructor', 'Instructor'),\n ('helper', 'Helper'),\n ]\n for name, verbose_name in roles:\n Role.objects.create(name=name, verbose_name=verbose_name)\n\n def fake_tags(self, faker):\n \"\"\"Provide fixed tags (before they end up in fixtures, see #626).\"\"\"\n tags = [\n ('SWC', 'Software Carpentry Workshop'),\n ('DC', 'Data Carpentry Workshop'),\n ('LC', 'Library Carpentry Workshop'),\n ('WiSE', 'Women in Science and Engineering'),\n ('TTT', 'Train the Trainers'),\n ]\n for tag, details in tags:\n Tag.objects.create(name=tag, details=details)\n\n def fake_badges(self, faker):\n \"\"\"Provide fixed badges (before they end up in fixtures, see #626).\"\"\"\n # 4 badges are already in the migrations: swc-instructor,\n # dc-instructor, maintainer, and trainer\n badges = [\n ('creator', 'Creator',\n 'Creating learning materials and other content'),\n ('member', 'Member', 'Software Carpentry Foundation member'),\n ('organizer', 'Organizer',\n 'Organizing workshops and learning groups'),\n ]\n for name, title, criteria in badges:\n Badge.objects.create(name=name, title=title, criteria=criteria)\n\n def fake_instructors(self, faker, count=5, add_badge=True,\n add_qualifications=True):\n \"\"\"Add a few people with random instructor badge, random airport, and\n random qualification.\"\"\"\n airports = list(Airport.objects.all())\n badges = list(Badge.objects.instructor_badges())\n lessons = list(Lesson.objects.all())\n for i in range(count):\n user_name = faker.user_name()\n emails = [faker.email(), faker.safe_email(), faker.free_email(),\n faker.company_email()]\n gender = random.choice(Person.GENDER_CHOICES)[0]\n if gender == 'F':\n name = faker.first_name_female()\n last_name = faker.last_name_female()\n elif gender == 'M':\n name = faker.first_name_male()\n last_name = faker.last_name_male()\n else:\n name, last_name = faker.first_name(), faker.last_name()\n\n person = Person.objects.create(\n personal=name,\n family=last_name,\n email=random.choice(emails),\n gender=gender,\n may_contact=random.choice([True, False]),\n airport=random.choice(airports),\n github=user_name,\n twitter=user_name,\n url=faker.url(),\n username=user_name,\n )\n\n if add_badge:\n Award.objects.create(\n person=person, badge=random.choice(badges),\n awarded=faker.date_time_this_year(before_now=True,\n after_now=False).date(),\n )\n if add_qualifications:\n for lesson in random.sample(lessons, 4):\n Qualification.objects.create(person=person, lesson=lesson)\n\n def fake_noninstructors(self, faker, count=5):\n \"\"\"Add a few people who aren't instructors.\"\"\"\n return self.fake_instructors(\n faker=faker, count=count, add_badge=False,\n add_qualifications=False,\n )\n\n def fake_organizations(self, faker, count=5):\n \"\"\"Add some organizations that host events.\"\"\"\n countries = list(Countries)\n for i in range(count):\n Organization.objects.create(\n domain=faker.domain_name(),\n fullname=faker.company(),\n country=random.choice(countries)[0],\n )\n\n def fake_current_events(self, faker, count=5):\n \"\"\"Ongoing and upcoming events.\"\"\"\n twodays = timedelta(days=2)\n organizations = list(Organization.objects.exclude(domain='self-organized'))\n countries = list(Countries)\n\n for i in range(count):\n start = faker.date_time_this_year(before_now=False,\n after_now=True).date()\n Event.objects.create(\n slug='{:%Y-%m-%d}-{}'.format(start, faker.slug()),\n start=start,\n end=start + twodays,\n url=faker.url(),\n host=random.choice(organizations),\n # needed in order for event to be published\n country=random.choice(countries)[0],\n venue=faker.word().title(),\n address=faker.sentence(nb_words=4, variable_nb_words=True),\n latitude=random.uniform(-90, 90),\n longitude=random.uniform(0, 180),\n )\n\n def fake_uninvoiced_events(self, faker, count=5):\n \"\"\"Preferably in the past, and with 'uninvoiced' status.\"\"\"\n twodays = timedelta(days=2)\n countries = list(Countries)\n organizations = list(Organization.objects.exclude(domain='self-organized'))\n\n for i in range(count):\n start = faker.date_time_this_year(before_now=True,\n after_now=False).date()\n Event.objects.create(\n slug='{:%Y-%m-%d}-{}'.format(start, faker.slug()),\n start=start,\n end=start + twodays,\n url=faker.url(),\n host=random.choice(organizations),\n # needed in order for event to be published\n country=random.choice(countries)[0],\n venue=faker.word().title(),\n address=faker.sentence(nb_words=4, variable_nb_words=True),\n latitude=random.uniform(-90, 90),\n longitude=random.uniform(0, 180),\n # needed in order for event to be uninvoiced\n invoice_status='not-invoiced',\n )\n\n def fake_unpublished_events(self, faker, count=5):\n \"\"\"Events with missing location data (which is required for publishing\n them).\"\"\"\n twodays = timedelta(days=2)\n organizations = list(Organization.objects.exclude(domain='self-organized'))\n\n for i in range(count):\n start = faker.date_time_this_year(before_now=True,\n after_now=True).date()\n Event.objects.create(\n slug='{:%Y-%m-%d}-{}'.format(start, faker.slug()),\n start=start,\n end=start + twodays,\n url=faker.url(),\n host=random.choice(organizations),\n )\n\n def fake_self_organized_events(self, faker, count=5):\n \"\"\"Full-blown events with 'self-organized' host.\"\"\"\n twodays = timedelta(days=2)\n self_organized = Organization.objects.get(domain='self-organized')\n countries = list(Countries)\n invoice_statuses = Event.INVOICED_CHOICES\n\n for i in range(count):\n start = faker.date_time_this_year(before_now=True,\n after_now=True).date()\n Event.objects.create(\n slug='{:%Y-%m-%d}-{}'.format(start, faker.slug()),\n start=start,\n end=start + twodays,\n url=faker.url(),\n host=self_organized,\n # needed in order for event to be published\n country=random.choice(countries)[0],\n venue=faker.word().title(),\n address=faker.sentence(nb_words=4, variable_nb_words=True),\n latitude=random.uniform(-90, 90),\n longitude=random.uniform(0, 180),\n # needed in order for event to be uninvoiced\n invoice_status=random.choice(invoice_statuses)[0],\n )\n\n def fake_tasks(self, faker, count=50):\n events = Event.objects.all()\n persons = Person.objects.all()\n roles = Role.objects.all()\n all_possible = itertools.product(events, persons, roles)\n\n for event, person, role in random.sample(list(all_possible), count):\n Task.objects.create(\n event=event,\n person=person,\n role=role,\n title=faker.sentence(nb_words=4, variable_nb_words=True),\n url=faker.url(),\n )\n\n def fake_trainees(self, faker):\n host = Organization.objects.all()[0]\n tag = Tag.objects.get(name='TTT')\n learner = Role.objects.get(name='learner')\n swc_instructor = Badge.objects.get(name='swc-instructor')\n dc_instructor = Badge.objects.get(name='dc-instructor')\n\n # events\n first_event = Event.objects.create(host=host, slug='first-ttt-event')\n first_event.tags.add(tag)\n first_event.save()\n\n second_event = Event.objects.create(host=host, slug='second-ttt-event')\n second_event.tags.add(tag)\n second_event.save()\n\n third_event = Event.objects.create(host=host, slug='third-ttt-event')\n third_event.tags.add(tag)\n third_event.save()\n\n # persons\n bob = Person.objects.create(username='bob-trainee',\n personal='Bob', family='Smith')\n alice = Person.objects.create(username='alice-trainee',\n personal='Alice', family='Smith')\n john = Person.objects.create(username='john-trainee',\n personal='Alice', family='Smith')\n\n # tasks\n Task.objects.create(event=first_event, person=bob, role=learner)\n Task.objects.create(event=first_event, person=alice, role=learner)\n Task.objects.create(event=second_event, person=john, role=learner)\n\n # awards\n Award.objects.create(event=first_event, person=bob, badge=swc_instructor,\n awarded=datetime(2016, 4, 1))\n Award.objects.create(event=third_event, person=bob, badge=dc_instructor,\n awarded=datetime(2016, 4, 1))\n\n def handle(self, *args, **options):\n faker = Faker()\n\n seed = options['seed']\n if seed is not None:\n faker.seed(seed)\n\n self.fake_airports(faker)\n self.fake_roles(faker)\n self.fake_tags(faker)\n self.fake_badges(faker)\n self.fake_instructors(faker)\n self.fake_noninstructors(faker)\n self.fake_organizations(faker)\n self.fake_current_events(faker)\n self.fake_uninvoiced_events(faker)\n self.fake_unpublished_events(faker)\n self.fake_self_organized_events(faker)\n self.fake_tasks(faker)\n self.fake_trainees(faker)\n" }, { "alpha_fraction": 0.5853304266929626, "alphanum_fraction": 0.5882353186607361, "avg_line_length": 39.5, "blob_id": "7f46d99775a574b0172a19b150b87a4e2f8dde63", "content_id": "f530b557f8190cbab1006478719d53373c7feef4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1377, "license_type": "permissive", "max_line_length": 93, "num_lines": 34, "path": "/workshops/management/commands/instructors_last_call.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "import sys\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.management.base import BaseCommand, CommandError\nfrom workshops.models import Badge, Event, Person, Role, Tag, Task\n\nclass Command(BaseCommand):\n help = 'Create mail addresses for events about to go stale: command line is event slugs.'\n\n def add_arguments(self, parser):\n parser.add_argument(\n 'event', help='Event slug',\n )\n\n def handle(self, *args, **options):\n\n # Every learner who was at this event but isn't an instructor.\n try:\n slug = options['event']\n event = Event.objects.get(slug=slug)\n learner = Role.objects.get(name='learner')\n instructor_badges = Badge.objects.instructor_badges()\n except ObjectDoesNotExist as e:\n self.stderr.write(str(e))\n sys.exit(1)\n\n # Report.\n trainees = Person.objects\\\n .filter(task__event=event, task__role=learner)\\\n .exclude(badges__in=instructor_badges)\\\n .distinct()\\\n .order_by('family', 'personal', 'email')\n people = trainees.values_list('family', 'personal', 'email') \n for (family, personal, email) in people:\n self.stdout.write('{0} {1} <{2}>'.format(personal, family, email))\n" }, { "alpha_fraction": 0.5305505394935608, "alphanum_fraction": 0.5344827771186829, "avg_line_length": 33.4375, "blob_id": "11dd0fbae6b2ab22fccf0ed8f6b2a258db562a11", "content_id": "f2331ef9e60da8957a7d6482568b91266affd9d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3306, "license_type": "permissive", "max_line_length": 70, "num_lines": 96, "path": "/workshops/test/test_diff.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "from django.core.urlresolvers import reverse\nfrom reversion.revisions import get_for_object, create_revision\n\nfrom workshops.models import Event, Person, Tag\n\nfrom .base import TestBase\n\n\nclass TestRevisions(TestBase):\n def setUp(self):\n self._setUpUsersAndLogin()\n self._setUpOrganizations()\n self.tag1, _ = Tag.objects.get_or_create(pk=1)\n self.tag2, _ = Tag.objects.get_or_create(pk=2)\n\n with create_revision():\n self.event = Event.objects.create(host=self.org_alpha,\n slug='event')\n self.event.tags.add(self.tag1)\n self.event.save()\n\n with create_revision():\n self.event.slug = 'better-event'\n self.event.host = self.org_beta\n self.event.tags.add(self.tag2)\n self.event.save()\n\n # load versions\n versions = get_for_object(self.event)\n assert len(versions) == 2\n self.newer, self.older = versions\n\n def test_showing_diff_event(self):\n # get newer revision page\n rv = self.client.get(reverse('object_changes',\n args=[self.newer.revision.pk]))\n # test returned context\n context = rv.context\n assert context['previous_version'] == self.older\n assert context['current_version'] == self.newer\n assert context['revision'] == self.newer.revision\n assert context['object'] == self.event\n assert 'object_prev' in context\n assert context['object_prev'].__class__ == Event\n\n\n def test_diff_shows_coloured_labels(self):\n # get newer revision page\n rv = self.client.get(reverse('object_changes',\n args=[self.newer.revision.pk]))\n # Red label for removed host\n self.assertContains(rv,\n '<a class=\"label label-danger\" href=\"{}\">-{}</a>'.format(\n self.org_alpha.get_absolute_url(),\n self.org_alpha\n ),\n html=True\n )\n # Green label for assigned host\n self.assertContains(rv,\n '<a class=\"label label-success\" href=\"{}\">+{}</a>'.format(\n self.org_beta.get_absolute_url(),\n self.org_beta\n ),\n html=True\n )\n # Grey label for pre-assigned tag\n self.assertContains(rv,\n '<a class=\"label label-default\" href=\"#\">{}</a>'.format(\n self.tag1\n ),\n html=True\n )\n # Green label for additionally assigned tag\n self.assertContains(rv,\n '<a class=\"label label-success\" href=\"#\">+{}</a>'.format(\n self.tag2\n ),\n html=True\n )\n\n def test_diff_shows_PK_for_deleted_relationships(self):\n # Delete the tag\n self.tag1.delete()\n self.tag2.delete()\n # get newer revision page\n rv = self.client.get(reverse('object_changes',\n args=[self.newer.revision.pk]))\n self.assertContains(rv,\n '<a class=\"label label-default\" href=\"#\">1</a>',\n html=True\n )\n self.assertContains(rv,\n '<a class=\"label label-success\" href=\"#\">+2</a>',\n html=True\n )\n" }, { "alpha_fraction": 0.5195646286010742, "alphanum_fraction": 0.5355455279350281, "avg_line_length": 42.398616790771484, "blob_id": "666983ab02ce878733818130604ccb98feefc4b2", "content_id": "519d1aad33187e14b76a3bc081a1b7b575d7f8eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18835, "license_type": "permissive", "max_line_length": 144, "num_lines": 434, "path": "/workshops/management/commands/import_training_progress.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "import csv\nimport json\nfrom datetime import datetime\nfrom django.db.models import Q\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom workshops.models import Event, Person, TrainingProgress, \\\n TrainingRequirement, Role, Award, Badge\n\nEXPECTED_HEADER = [\n 'Discussion Session', # column A\n 'SWC Pull Request', # column B\n 'DC Exercise', # column C\n 'Personal', # column D\n 'Family', # column E\n 'Email', # column F\n 'Session', # column G\n 'DC', # column H\n 'SWC', # column I\n 'Completion Date', # column J\n 'Completion Flag', # column K\n]\n\nTRAINING_TO_INSTRUCTOR = {\n '2015-10-15-ttt-online': 'Greg Wilson',\n '2015-12-07-eu-instructor-training Paris': 'Stephen Crouch',\n '2015-12-07-eu-instructor-training Potsdam': 'Stephen Crouch',\n '2015-12-07-eu-instructor-training Thessaloniki': 'Stephen Crouch',\n '2015-12-07-na-instructor-training Arlington': 'Christina Koch',\n '2015-12-07-na-instructor-training Vancouver': 'Christina Koch',\n '2015-12-07-na-instructor-training Wisconsin': 'Christina Koch',\n '2015-12-07-ttt-Au': 'Aleksandra Pawlik',\n '2015-12-07-ttt-toronto Curitiba': 'Greg Wilson',\n '2015-12-07-ttt-toronto Toronto': 'Greg Wilson',\n '2016-01-05-ok-instructor-training': 'Christina Koch',\n '2016-01-13-instructor-training-lausanne': 'Aleksandra Pawlik',\n '2016-01-18-brisbane-instructor-training': 'Aleksandra Pawlik',\n '2016-01-21-melbourne-instructor-training': 'Aleksandra Pawlik',\n '2016-01-21-training-florida': 'Greg Wilson',\n '2016-01-28-auckland-instructor-training': 'Aleksandra Pawlik',\n '2016-02-16-training-online': 'Greg Wilson',\n '2016-02-22-training-ucdavis': 'Greg Wilson',\n '2016-03-09-ttt-uw': 'Ariel Rokem',\n '2016-04-13-training-online': 'Greg Wilson',\n '2016-04-17-instructor-training-nwu': 'Aleksandra Pawlik',\n '2016-05-04-instructor-training-ssi': 'Aleksandra Pawlik',\n '2016-05-11-ttt-compute-canada': 'Greg Wilson',\n '2016-05-18-ttt-online': 'Greg Wilson',\n '2016-06-08-ttt-online': 'Greg Wilson',\n '2016-06-08-ttt-arizona': 'Greg Wilson',\n '2016-06-30-Oslo-ttt': 'Christina Koch',\n '2016-07-11-ttt-scipy': 'Ariel Rokem',\n}\n\nNOTES = 'This record was automatically generated by import script.'\n\nclass RowError(Exception):\n pass\n\n\nclass Command(BaseCommand):\n help = 'Import training progress from a CSV file.'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.errors_occurred = False\n self.row_no = '?'\n self.personal, self.family, self.email = '?', '?', '?'\n\n def add_arguments(self, parser):\n parser.add_argument(\n 'filename', help='CSV file',\n )\n parser.add_argument(\n 'trainee2examiner', help='JSON file with map from trainees\\' usernames to examiner full names',\n )\n parser.add_argument(\n '--no-dry-run', action='store_true', dest='no-dry-run',\n default=False,\n )\n\n def warning(self, msg):\n full_msg = 'W Row {:3} {:15} {:20} {:40}: '.format(\n self.row_no + 2, self.personal, self.family, self.email)\n full_msg += msg + '\\n'\n self.stdout.write(full_msg)\n\n def error(self, msg):\n full_msg = 'E Row {:3} {:15} {:20} {:40}: '.format(\n self.row_no + 2, self.personal, self.family, self.email)\n full_msg += msg + '\\n'\n self.stderr.write(full_msg)\n self.errors_occurred = True\n raise RowError\n\n def assert_(self, var_name, flag):\n if not flag:\n self.error('Invalid value of {} variable.'.format(var_name))\n\n def handle(self, *args, **options):\n trainee2examiner_filename = options['trainee2examiner']\n trainee2swc_examiner, trainee2dc_examiner = \\\n self.load_trainee_to_examiner_map(trainee2examiner_filename)\n\n filename = options['filename']\n\n dry_run = not options['no-dry-run']\n\n with open(filename) as s:\n reader = csv.reader(s)\n\n # Check header (first row with column names)\n header = reader.__next__()\n self.validate_header(header)\n\n # Pull Requirement and Badge records\n training_requirement = TrainingRequirement.objects.get(name='Training')\n swc_homework = TrainingRequirement.objects.get(name='SWC Homework')\n dc_homework = TrainingRequirement.objects.get(name='DC Homework')\n discussion = TrainingRequirement.objects.get(name='Discussion')\n swc_demo = TrainingRequirement.objects.get(name='SWC Demo')\n dc_demo = TrainingRequirement.objects.get(name='DC Demo')\n\n swc_instructor_badge = Badge.objects.get(name='swc-instructor')\n dc_instructor_badge = Badge.objects.get(name='dc-instructor')\n\n # A list of TrainingProgress records that will be bulk added at\n # the end.\n progresses = []\n\n for self.row_no, row in enumerate(reader):\n try:\n # Reset state\n self.personal, self.family, self.email = '?', '?', '?'\n\n # Check number of columns\n if len(row) != len(header):\n self.error('Invalid row. Different number of columns')\n\n # Unpack column values and make initial preprocessing\n discussion_session = row[0].strip() # column A: \"\" or \"* withdrawn\" or \"2016-02-17 Ivan Gonzalez\"\n swc_pull_request = row[1].strip() # column B: \"\" or link to SWC homework\n dc_exercise = row[2].strip() # column C: \"\" or \"DC submitted\"\n self.personal = row[3].strip() # column D: always present\n self.family = row[4].strip() # column E: always present\n self.email = row[5].strip().lower() # column F: always present\n session = row[6].strip() # column G: \"2015-10-15-ttt-online\"\n dc = row[7].strip().lower() # column H: \"\" or \"SQL\" or \"R\" or \"Python\"\n swc = row[8].strip().lower() # column I: \"\" or \"Git\" or \"Shell\" or \"Python\" or \"R\"\n raw_completion_date = row[9].strip() # column J: \"\" or \"2016-03-22\"\n\n # Validate row\n self.assert_('swc_pull_request', swc_pull_request == '' or 'http' in swc_pull_request)\n self.assert_('dc_exercise', dc_exercise in ('', 'DC submitted') or 'http' in dc_exercise)\n self.assert_('self.personal', self.personal)\n self.assert_('self.family', self.family)\n self.assert_('self.email', self.email)\n self.assert_('self.email', '@' in self.email)\n self.assert_('dc', dc in ('', 'sql', 'r', 'python', 'openrefine', 'spreadsheets'))\n self.assert_('swc', swc in ('', 'r', 'python', 'shell', 'git', 'testing'))\n\n # Preprocess columns\n discussion_date, mentor, withdrawn = self.preprocess_discussion_session_column(discussion_session)\n slug, training = self.preprocess_training_name(session)\n completion_date = self.preprocess_completion_date(raw_completion_date)\n\n # Find instructor, trainee account and examiner\n instructor = self.find_instructor(session)\n trainee = self.find_trainee(self.email, self.family, self.personal)\n swc_examiner_from_JSON = self.find_examiner(trainee2swc_examiner, trainee.username)\n dc_examiner_from_JSON = self.find_examiner(trainee2dc_examiner, trainee.username)\n\n # Find out whether the trainee is certified and by whom\n swc_awarded, dc_awarded, swc_examiner_from_award, dc_examiner_from_award = \\\n self.check_awards(\n completion_date, dc, dc_exercise, dc_instructor_badge,\n swc, swc_instructor_badge, swc_pull_request, trainee)\n\n # Find out examiner based on Badge.awarded_by or data from JSON file\n if swc_awarded and swc_examiner_from_JSON is None and swc_examiner_from_award is None:\n self.error('Missing trainee in JSON (swc)')\n if swc_awarded and swc_examiner_from_JSON and swc_examiner_from_award and swc_examiner_from_JSON != swc_examiner_from_award:\n self.error('Ambiguous swc examiner')\n if dc_awarded and dc_examiner_from_JSON is None and dc_examiner_from_award is None:\n self.error('Missing trainee in JSON (dc)')\n if dc_awarded and dc_examiner_from_JSON and dc_examiner_from_award and dc_examiner_from_JSON != dc_examiner_from_award:\n self.error('Ambiguous dc examiner')\n swc_examiner = swc_examiner_from_JSON or swc_examiner_from_award\n dc_examiner = dc_examiner_from_JSON or dc_examiner_from_award\n\n except RowError:\n pass\n\n else:\n # Create Training progress record\n progresses.append(TrainingProgress(\n trainee=trainee,\n requirement=training_requirement,\n evaluated_by=instructor,\n event=training,\n discarded=withdrawn,\n notes=NOTES,\n ))\n\n # Create SWC Homework progress record\n if swc_pull_request:\n progresses.append(TrainingProgress(\n trainee=trainee,\n requirement=swc_homework,\n evaluated_by=None,\n url=swc_pull_request,\n discarded=withdrawn,\n notes=NOTES,\n ))\n\n # Create DC Homework progress record\n if dc_exercise:\n if dc_exercise == 'DC submitted':\n url = 'http://datacarpentry.org'\n else:\n assert 'http' in dc_exercise\n url = dc_exercise\n progresses.append(TrainingProgress(\n trainee=trainee,\n requirement=dc_homework,\n evaluated_by=None,\n url=url,\n discarded=withdrawn,\n notes=NOTES,\n ))\n\n # Create Discussion Session progress record\n if discussion_date:\n progresses.append(TrainingProgress(\n trainee=trainee,\n requirement=discussion,\n evaluated_by=mentor,\n created_at=discussion_date,\n discarded=withdrawn,\n notes=NOTES,\n ))\n\n # Create Demo Session progress record\n if swc_awarded:\n progresses.append(TrainingProgress(\n trainee=trainee,\n requirement=swc_demo,\n evaluated_by=swc_examiner,\n discarded=withdrawn,\n notes=NOTES,\n ))\n\n if dc_awarded:\n progresses.append(TrainingProgress(\n trainee=trainee,\n requirement=dc_demo,\n evaluated_by=dc_examiner,\n discarded=withdrawn,\n notes=NOTES,\n ))\n\n if dry_run:\n self.stderr.write('Database wasn\\'t altered because this is dry run. Use --no-dry-run parameter to import data.\\n')\n elif self.errors_occurred:\n self.stderr.write('Database wasn\\'t altered because of errors.\\n')\n else:\n self.stdout.write('All data was read from the CSV file. Starting importing data into AMY database.\\n')\n TrainingProgress.objects.bulk_create(progresses)\n self.stdout.write('Success. :-)\\n')\n\n def load_trainee_to_examiner_map(self, filename):\n with open(filename) as s:\n data = json.load(s)\n return data['swc-instructor'], data['dc-instructor']\n\n def validate_header(self, header):\n if header != EXPECTED_HEADER:\n self.stdout.write('Invalid header.\\n')\n self.stdout.write('Expected: {}\\n'.format(EXPECTED_HEADER))\n self.stdout.write('Got: {}\\n'.format(header))\n raise CommandError('Invalid header')\n\n def preprocess_discussion_session_column(self, discussion_session):\n '''Preprocess value from column A.'''\n\n if discussion_session == '':\n discussion_date = None\n mentor = None\n withdrawn = False\n elif discussion_session == '* withdrawn':\n discussion_date = None\n mentor = None\n withdrawn = True\n else: # i.e. \"2016-02-17 Ivan Gonzalez\"\n withdrawn = False\n try:\n raw_date, mentor_personal, mentor_family = discussion_session.split(' ')\n discussion_date = datetime.strptime(raw_date, '%Y-%m-%d')\n except Exception:\n self.error('Invalid column A value: {}'.format(discussion_session))\n mentor = None\n discussion_date = None\n else:\n if mentor_family == 'Barneche':\n mentor_family = 'Barneche Rosado'\n\n try:\n mentor = Person.objects.get(personal=mentor_personal,\n family=mentor_family)\n except Person.DoesNotExist:\n self.error('Unknown mentor: {} {}'.format(mentor_personal, mentor_family))\n mentor = None\n\n return discussion_date, mentor, withdrawn\n\n def preprocess_training_name(self, session):\n '''Preprocess value from column G.'''\n\n # split \"2015-12-07-na-instructor-training Arlington\"\n slug, city = (session + ' ').split(' ', maxsplit=1)\n\n # find instructor training event\n try:\n training = Event.objects.get(slug=slug)\n except Event.DoesNotExist:\n self.error('No Event slug={}'.format(slug))\n training = None\n\n return slug, training\n\n def preprocess_completion_date(self, raw_completion_date):\n '''Preprocess value from column J.'''\n\n if raw_completion_date == '':\n completion_date = None\n else:\n completion_date = datetime.strptime(raw_completion_date, '%Y-%m-%d')\n return completion_date\n\n def find_trainee(self, email, family, personal):\n name_match = Q(personal__iexact=personal, family__iexact=family)\n email_match = Q(email__iexact=email)\n try:\n trainee = Person.objects.get(email_match)\n except Person.DoesNotExist:\n try:\n trainee = Person.objects.get(name_match)\n except Person.MultipleObjectsReturned:\n candidates = Person.objects.filter(name_match)\n trainee = None\n self.error('More than one matching trainee.')\n except Person.DoesNotExist:\n trainee = None\n self.error('No such trainee in db.')\n\n return trainee\n\n def find_instructor(self, session_name):\n try:\n instructor_name = TRAINING_TO_INSTRUCTOR[session_name]\n except KeyError:\n instructor = None\n self.error('Cannot find instructor -- unknown session slug=\"{}\"'.format(session_name))\n else:\n personal, family = instructor_name.split()\n try:\n instructor = Person.objects.get(personal__iexact=personal,\n family__iexact=family)\n except Person.DoesNotExist:\n instructor = None\n self.error('No instructor {} {} in AMY database.')\n\n return instructor\n\n def check_awards(self, completion_date, dc, dc_exercise,\n dc_instructor_badge, swc, swc_instructor_badge,\n swc_pull_request, trainee):\n\n # Check SWC awards\n try:\n swc_award = Award.objects.get(person=trainee,\n badge=swc_instructor_badge)\n except Award.DoesNotExist:\n swc_awarded = False\n swc_awarded_by = None\n if completion_date and (swc_pull_request or swc):\n self.warning('no swc-instructor awarded')\n else: # swc-instructor badge awarded\n swc_awarded = True\n swc_awarded_by = swc_award.awarded_by\n if not completion_date:\n self.warning('has swc-instructor but should not have')\n if not swc_pull_request and not swc:\n pass\n # self.warning('has swc-instructor but probably should not have')\n\n # Check DC awards\n try:\n dc_award = Award.objects.get(person=trainee,\n badge=dc_instructor_badge)\n except Award.DoesNotExist:\n dc_awarded = False\n dc_awarded_by = None\n if completion_date and (dc_exercise or dc):\n self.warning('no dc-instructor awarded')\n else: # dc-instructor badge awarded\n dc_awarded = True\n dc_awarded_by = dc_award.awarded_by\n if not completion_date:\n self.warning('has dc-instructor but should not have')\n if not dc_exercise and not dc:\n pass\n # self.warning('has dc-instructor but probably should not have')\n\n if completion_date and not swc_awarded and not dc_awarded:\n self.warning('no swc- or dc-instructor awarded')\n\n return swc_awarded, dc_awarded, swc_awarded_by, dc_awarded_by\n\n def find_examiner(self, trainee2examiner, trainee_username):\n try:\n examiner_name = trainee2examiner[trainee_username]\n except KeyError:\n examiner = None\n else:\n personal, family = examiner_name.split(' ')\n try:\n examiner = Person.objects.get(personal__iexact=personal,\n family__iexact=family)\n except Person.DoesNotExist:\n self.error('No examiner \"{}\"'.format(examiner_name))\n\n return examiner\n" }, { "alpha_fraction": 0.6481481194496155, "alphanum_fraction": 0.6481481194496155, "avg_line_length": 53, "blob_id": "e34934c1ce6cf83c5339c9e15c8ff74217b1ceaf", "content_id": "388536b10a0c390a7fade609a335e008fb6f0cab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 108, "license_type": "permissive", "max_line_length": 69, "num_lines": 2, "path": "/workshops/templates/base_nav_fixed.html", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "{% extends 'base_nonav_fixed.html' %}\n{% block navbar %}{% include 'navigation_fixed.html' %}{% endblock %}\n" }, { "alpha_fraction": 0.5876106023788452, "alphanum_fraction": 0.5876106023788452, "avg_line_length": 21.600000381469727, "blob_id": "0067ee10ac03c2b7018973ef5c8336d0581975da", "content_id": "ad7d40c99d8718434dc5c15f98165b52b0a9fc6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1130, "license_type": "permissive", "max_line_length": 93, "num_lines": 50, "path": "/docs/PyData_deploy.md", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "# Deploy AMY for PyData\n\n - Add `pydata` to `INSTALLED_APPS`. Ensure that the `pydata` app is listed \n before `workshops` app.\n\n ```py\n INSTALLED_APPS = (\n ...\n 'pydata',\n 'workshops',\n ...\n )\n ```\n\n - Include `pydata.urls` in the `urlpatterns` of `amy.urls` before `workshops.url`.\n\n ```py\n urlpatterns = [\n ...\n url(r'^workshops/', include('pydata.urls')),\n url(r'^workshops/', include('workshops.urls')),\n ...\n ]\n ```\n\n - Add the username and password of a superuser of the conference site to `amy/settings.py`.\n\n ```py\n PYDATA_USERNAME_SECRET = 'username'\n PYDATA_PASSWORD_SECRET = 'password'\n ```\n\n You can also fetch them from the environment variables.\n\n ```py\n PYDATA_USERNAME_SECRET = os.environ.get('PYDATA_USERNAME_SECRET')\n PYDATA_PASSWORD_SECRET = os.environ.get('PYDATA_PASSWORD_SECRET')\n ```\n\n - Install fixtures from the `pydata/fixtures/` directory.\n\n ```sh\n python manage.py loaddata pydata/fixtures/*\n ```\n\n - Ensure that all checks pass.\n\n ```sh\n python manage.py check\n ```\n" }, { "alpha_fraction": 0.6463595628738403, "alphanum_fraction": 0.6493313312530518, "avg_line_length": 29.590909957885742, "blob_id": "6f94fbcd5dd18c1555dd090b37935743e2a84e75", "content_id": "d400d7a8b91984459d4175611573c90644fabea8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 673, "license_type": "permissive", "max_line_length": 99, "num_lines": 22, "path": "/workshops/management/commands/report_invoicing.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "import sys\nimport csv\nfrom django.core.management.base import BaseCommand, CommandError\nfrom workshops.models import Event\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'Report all financial activity related to invoicing.'\n\n def handle(self, *args, **options):\n if len(args) != 0:\n raise CommandError('Usage: report_invoicing')\n\n events = Event.objects.filter(admin_fee__gt=0).filter(start__isnull=False).order_by('slug')\n\n records = [['event', 'fee', 'paid']]\n for e in events:\n records.append([e.slug, e.admin_fee, e.invoice_status])\n\n writer = csv.writer(sys.stdout)\n writer.writerows(records)\n" }, { "alpha_fraction": 0.5801869034767151, "alphanum_fraction": 0.5824298858642578, "avg_line_length": 38.92537307739258, "blob_id": "9815c7844af572fc255d20483f7f2323200f0ea7", "content_id": "67a8cd9f80cf7145d756359e9fff20f2261f84a2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2675, "license_type": "permissive", "max_line_length": 109, "num_lines": 67, "path": "/workshops/management/commands/check_certificates.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport csv\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.management.base import BaseCommand, CommandError\nfrom workshops.models import Award, Badge, Person\n\n\nclass Command(BaseCommand):\n help = 'Report inconsistencies in PDF certificates.'\n\n def add_arguments(self, parser):\n parser.add_argument(\n 'path', help='Path to root directory of certificates repository',\n )\n\n def handle(self, *args, **options):\n '''Main entry point.'''\n\n path_to_root = options['path']\n\n badges = self.get_badges()\n result = [['which','badge','event','username','person','email','awarded']]\n for (name, badge) in badges:\n db_records = self.get_db_records(badge)\n db_people = db_records.keys()\n cert_path = os.path.join(path_to_root, name)\n if not os.path.isdir(cert_path):\n print('No directory {0}'.format(name), file=sys.stderr)\n else:\n file_people = self.get_file_people(cert_path)\n self.missing(result, 'database-disk', name, db_people - file_people, db_records)\n self.missing(result, 'disk-database', name, file_people - db_people, db_records)\n csv.writer(sys.stdout).writerows(result)\n\n def get_badges(self):\n '''Get all available badges as list of lower-case name and badge pairs.'''\n\n return [(b.name.lower(), b) for b in Badge.objects.all()]\n\n def get_db_records(self, badge):\n '''Get set of usernames of all people with the given badge.'''\n\n objects = Award.objects.filter(badge=badge).values_list('person__username', 'awarded', 'event__slug')\n return dict((obj[0], {'awarded': obj[1], 'event': obj[2]}) for obj in objects)\n\n def get_file_people(self, path):\n '''Get names of all people with the given certificate.'''\n\n return set([os.path.splitext(e)[0]\n for e in os.listdir(path)\n if e.endswith('.pdf')])\n\n def missing(self, report, title, kind, usernames, records):\n '''Report missing usernames.'''\n for uid in usernames:\n try:\n p = Person.objects.get(username=uid)\n name = p.get_full_name()\n email = p.email\n event, awarded = '', ''\n if uid in records:\n event = records[uid]['event']\n awarded = records[uid]['awarded']\n report.append([title, kind, event, uid, p.get_full_name(), p.email, awarded])\n except Person.DoesNotExist:\n print('{0}'.format(uid), file=sys.stderr)\n" }, { "alpha_fraction": 0.6739130616188049, "alphanum_fraction": 0.674500584602356, "avg_line_length": 27.366666793823242, "blob_id": "fc5dbd212ac7d63827174ca69daa975e290d9fd4", "content_id": "511dce613a11d02135aace204990db2e15f12544", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1702, "license_type": "permissive", "max_line_length": 74, "num_lines": 60, "path": "/pydata/forms.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "from django import forms\n\nfrom workshops.forms import TaskFullForm, SponsorshipForm, BootstrapHelper\nfrom workshops.models import Person, Task, Sponsorship\n\n\nclass PersonMinimalForm(forms.ModelForm):\n\n class Meta:\n model = Person\n fields = ('username', 'personal', 'family', 'email', 'url')\n\n\nclass BaseModelAddFormSet(forms.models.BaseModelFormSet):\n can_delete = True\n can_order = False\n min_num = forms.formsets.DEFAULT_MIN_NUM\n max_num = forms.formsets.DEFAULT_MAX_NUM\n absolute_max = 2 * max_num\n validate_max = False\n validate_min = False\n\n helper = BootstrapHelper(\n form_tag=False, add_submit_button=False, add_reset_button=False)\n\n def __init__(self, *args, **kwargs):\n # Override the default form helper\n super().__init__(*args, **kwargs)\n self.form.helper = self.helper\n\n def add_fields(self, form, index):\n # Change label of DELETE checkbox\n super().add_fields(form, index)\n form[forms.formsets.DELETION_FIELD_NAME].label = 'Do not import'\n\n def get_queryset(self):\n # Do not show any existing model in the formset\n return self.model.objects.none()\n\n def total_form_count(self):\n # Restrict the total number of forms to number of initial forms\n if self.data or self.files:\n return super().total_form_count()\n else:\n return len(self.initial_extra)\n\n\nclass PersonAddFormSet(BaseModelAddFormSet):\n model = Person\n form = PersonMinimalForm\n\n\nclass TaskAddFormSet(BaseModelAddFormSet):\n model = Task\n form = TaskFullForm\n\n\nclass SponsorshipAddFormSet(BaseModelAddFormSet):\n model = Sponsorship\n form = SponsorshipForm\n" }, { "alpha_fraction": 0.601686418056488, "alphanum_fraction": 0.6069062352180481, "avg_line_length": 72.25, "blob_id": "29a2eedca290b88c3c25478cbd8ea86d6cb1eecf", "content_id": "8864cf3a5336265a040378675f18aa628a5a39d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 5015, "license_type": "permissive", "max_line_length": 712, "num_lines": 68, "path": "/workshops/templates/workshops/event_details_table.html", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "{% load links %}\n{% load tags %}\n<table class=\"table table-striped\">\n <tr><td>slug:</td><td colspan=\"2\">{{ event.slug|default:\"—\" }}</td></tr>\n <tr><td>completed:</td><td colspan=\"2\">{{ event.completed|yesno }}</td></tr>\n <tr class=\"{% if event.start > event.end %}bg-danger{% endif %}\"><td>start date:</td><td colspan=\"2\">{{ event.start|default:\"—\" }}</td></tr>\n <tr class=\"{% if event.start > event.end %}bg-danger{% endif %}\"><td>end date: </td><td colspan=\"2\">{{ event.end|default:\"—\" }}</td></tr>\n <tr><td>host:</td><td colspan=\"2\"><a href=\"{% url 'organization_details' event.host.domain %}\">{{ event.host }}</a></td></tr>\n <tr><td>administrator:</td><td colspan=\"2\">{% if event.administrator %}<a href=\"{{ event.administrator.get_absolute_url }}\">{{ event.administrator }}</a>{% else %}—{% endif %}</td></tr>\n <tr><td>tags:</td><td colspan=\"2\">{% for tag in event.tags.all %}{% bootstrap_tag tag.name %}{% endfor %}</td></tr>\n <tr class=\"{% if not event.url %}bg-danger{% endif %}\"><td>Website URL:</td><td colspan=\"2\">{{ event.website_url|default:\"—\"|urlize_newtab }} {% if event.url %}<a href=\"{% url 'validate_event' event.slug %}\" class=\"btn btn-primary btn-xs pull-right\" id=\"validate_event\">validate event</a>{% else %}<a class=\"btn btn-danger btn-xs pull-right\" id=\"error_event_url\" href=\"#\" data-toggle=\"popover\" title=\"Validation error\" data-content=\"Cannot validate an event without URL pointing to the GitHub repository, e.g.: <code>https://github.com/swcarpentry/2015-05-24-training</code>\" data-html=\"true\">Error</a>{% endif %}</td></tr>\n <tr><td>Language:</td><td colspan=\"2\">{{ event.language|default:\"—\" }}</td></tr>\n <tr><td>Eventbrite key:</td><td colspan=\"2\">{% if event.reg_key %}<a href=\"https://www.eventbrite.com/myevent?eid={{ event.reg_key }}\" title=\"Go to Eventbrite's page for this event\" target=\"_blank\">{{ event.reg_key }}</a>{% else %}—{% endif %}</td></tr>\n <tr><td>admin fee:</td><td colspan=\"2\">{{ event.admin_fee|default_if_none:\"—\" }}</td></tr>\n <tr>\n <td>invoice:</td>\n <td colspan=\"2\">\n {{ event.get_invoice_status_display }}\n {% if event.uninvoiced %}\n <a href=\"{% url 'event_invoice' event.slug %}\" class=\"btn btn-primary btn-xs pull-right\">Invoice</a>\n {% else %}\n <a href=\"#\" class=\"btn btn-primary btn-xs pull-right disabled\">Invoice</a>\n {% endif %}\n </td>\n </tr>\n <tr>\n <td>invoice requests:</td>\n <td colspan=\"2\">\n {% if event.invoicerequest_set.all %}\n <ul>\n {% for request in event.invoicerequest_set.all %}\n <li><a href=\"{{ request.get_absolute_url }}\" target=\"_blank\">{{ request }}</a> (status: {{ request.long_status }})</li>\n {% endfor %}\n </ul>\n {% else %}\n —\n {% endif %}\n </td>\n </tr>\n <tr class=\"{% if not event.attendance %}bg-danger{% endif %}\">\n <td>attendance:</td>\n <td colspan=\"2\">\n {{ event.attendance|default_if_none:\"—\" }}\n {% if not event.attendance and event.mailto %}\n <a href=\"{% include 'workshops/attendance_email_href.html' with event=event %}\" target=\"_blank\" class=\"btn btn-primary btn-xs pull-right\">Ask for attendance</a>\n {% else %}\n <a href=\"#\" class=\"btn btn-primary btn-xs pull-right disabled\">Ask for attendance</a>\n {% endif %}\n </td>\n </tr>\n <tr><td>contact:</td><td colspan=\"2\">{{ event.contact|default_if_none:\"—\"|urlize }}</td></tr>\n <tr>\n <td rowspan=\"4\">location details:</td>\n <td class=\"{% if not event.country %}bg-danger{% endif %}\">Country:</td>\n <td class=\"{% if not event.country %}bg-danger{% endif %}\">\n {% if event.country %}\n {{ event.country.name }} <img src=\"{{ event.country.flag }}\" alt=\"{{ event.country }}\" class=\"country-flag\" />\n {% else %}\n —\n {% endif %}\n </td>\n </tr>\n <tr class=\"{% if not event.venue %}bg-danger{% endif %}\"><td>Venue:</td><td>{{ event.venue|default:\"—\" }}</td></tr>\n <tr class=\"{% if not event.address %}bg-danger{% endif %}\"><td>Address:</td><td>{{ event.address|default:\"—\" }}</td></tr>\n <tr class=\"{% if not event.latitude or not event.longitude %}bg-danger{% endif %}\"><td>Lat/long:</td><td>{{ event.latitude|default:\"—\" }} / {{ event.longitude|default:\"—\" }} {% if event.latitude and event.longitude %}<a href=\"{% url 'workshop_staff' %}?latitude={{ event.latitude }}&amp;longitude={{ event.longitude }}&amp;submit=Submit\" class=\"btn btn-primary btn-xs pull-right\" id=\"find_closest_instructors\">find closest instructors</a>{% else %}<a class=\"btn btn-danger btn-xs pull-right\" id=\"error_closest_instructors\" href=\"#\" data-toggle=\"popover\" title=\"Search error\" data-content=\"Cannot search for closest instructors without latitude and longitude of event's location.\">Error</a>{% endif %}</td></tr>\n <tr><td>Event request</td><td colspan=\"2\">{% if event.request %}<a href=\"{{ event.request.get_absolute_url }}\">{{ event.request }}</a>{% else %}—{% endif %}</td></tr>\n <tr><td>Notes</td><td colspan=\"2\"><pre>{{ event.notes }}</pre></td></tr>\n</table>\n" }, { "alpha_fraction": 0.5895041823387146, "alphanum_fraction": 0.59690922498703, "avg_line_length": 35.11627960205078, "blob_id": "2ce465215ba4770cfcc9d7fa8c5cf173ab1574ff", "content_id": "6e995b3bec3418df6b8922ca07995d5f0b4ed8da", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3106, "license_type": "permissive", "max_line_length": 118, "num_lines": 86, "path": "/pydata/apps.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "import re\n\nfrom django import forms\nfrom django.apps import AppConfig\nfrom django.utils.functional import curry\n\n\nclass PyDataConfig(AppConfig):\n name = 'pydata'\n label = 'PyData'\n verbose_name = 'AMY for PyData conferences'\n\n def ready(self):\n from . import checks\n\n from workshops.forms import PersonForm, TaskForm, SponsorshipForm\n from workshops.models import Person, Task, Organization, Sponsorship\n from workshops.views import EventCreate, PersonCreate\n\n # Add choices to the `amount` field\n Sponsorship.LEVELS = (\n (0, 'Founding'),\n (15000, 'Diamond'),\n (8000, 'Platinum'),\n (5000, 'Gold'),\n (3000, 'Silver'),\n (1500, 'Supporting'),\n (1, 'Community'),\n )\n\n # Add choices to `amount` field\n # Django migration system complains about missing migrations\n amount_field = Sponsorship._meta.get_field('amount')\n amount_field.choices = Sponsorship.LEVELS\n\n # Add method `get_amount_display` to Sponsorship to return the level\n setattr(\n Sponsorship,\n 'get_amount_display',\n curry(Sponsorship._get_FIELD_display, field=amount_field)\n )\n\n # Override the `__str__` method to display level instead of amount\n def __str__(self):\n return '{}: {}'.format(self.organization, self.get_amount_display())\n Sponsorship.add_to_class('__str__', __str__)\n\n # Add a regex to obtain URL of conference and `pk` of sponsor instance\n Sponsorship.PROFILE_REGEX = re.compile(r'^(?P<url>.+?(?=/sponsors))/sponsors/(?P<id>\\d+)/?') # noqa\n\n # Add \"Import from URL\" button to SponsorshipForm\n class Media:\n js = ('import_sponsor.js', )\n SponsorshipForm.Media = Media\n\n # Add a dropdown to the `amount` field on SponsorshipForm\n SponsorshipForm.base_fields['amount'] = forms.ChoiceField(\n choices=Sponsorship.LEVELS,\n )\n\n # Add a regex to obtain URL of conference and `pk` of presentation\n Task.PRESENTATION_REGEX = re.compile(r'^(?P<url>.+?(?=/schedule))/schedule/presentation/(?P<id>\\d+)/?') # noqa\n\n # Add \"Import from URL\" button to TaskForm\n class Media:\n js = ('import_task.js', )\n TaskForm.Media = Media\n\n # Add a regex to obtain URL of conference and `pk` of speaker\n Person.PROFILE_REGEX = re.compile(r'^(?P<url>.+?(?=/speaker))/speaker/profile/(?P<id>[^/]+)/?') # noqa\n\n # Add \"Import from URL\" button to PersonForm on PersonCreate view\n PersonCreate.template_name = 'pydata/person_create_form.html'\n\n class Media:\n js = ('import_person.js', )\n PersonForm.Media = Media\n\n # Prepopulate fields on EventCreate view\n def get_initial(self):\n numfocus = Organization.objects.get(fullname='NumFOCUS')\n return {\n 'administrator': numfocus,\n 'assigned_to': self.request.user,\n }\n EventCreate.get_initial = get_initial\n" }, { "alpha_fraction": 0.5208970308303833, "alphanum_fraction": 0.5276248455047607, "avg_line_length": 41.65217208862305, "blob_id": "0c3374faefa58d565803cb4d8ac357621dd53c4d", "content_id": "da68c2103cb5595ae3be51fd6577b579f8399dd9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4905, "license_type": "permissive", "max_line_length": 77, "num_lines": 115, "path": "/workshops/management/commands/trainings_completion_rates.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "import csv\n\nfrom django.core.management.base import BaseCommand\nfrom django.db.models import Count, Case, When, Value, IntegerField\n\nfrom workshops.models import Badge, Event, Tag, Person, Role\nfrom workshops.util import universal_date_format\n\n\nclass Command(BaseCommand):\n help = 'Report completion rates for new training participants.'\n\n learner = Role.objects.get(name='learner')\n dc_badge = Badge.objects.get(name='dc-instructor')\n swc_badge = Badge.objects.get(name='swc-instructor')\n dc_tag = Tag.objects.get(name='DC')\n swc_tag = Tag.objects.get(name='SWC')\n online_tag = Tag.objects.get(name='online')\n\n def trainings(self):\n \"\"\"Create list of trainings.\"\"\"\n return Event.objects.filter(tags=Tag.objects.get(name='TTT')) \\\n .prefetch_related('tags', 'task_set') \\\n .order_by('start')\n\n def badge_type(self, tags):\n \"\"\"Return badge of the same type as event tags.\n\n If no SWC or DC tag is present, SWC badge is assumed.\"\"\"\n if self.swc_tag in tags:\n return self.swc_badge\n elif self.dc_tag in tags:\n return self.dc_badge\n else:\n return self.swc_badge\n\n def learners(self, event):\n \"\"\"Return list of learners at specific training event.\"\"\"\n return Person.objects.filter(task__event=event,\n task__role=self.learner)\n\n def percent(self, numerator, denominator):\n \"\"\"Return percentage if non-zero denominator else 0.0\"\"\"\n if denominator:\n return round((100. * numerator) / denominator, 1)\n return 0.0\n\n def handle(self, *args, **options):\n fields = [\n 'start', 'slug', 'online', 'badge', 'learners',\n 'completed this', 'completed this [%]',\n 'completed other', 'completed other [%]',\n 'no badge', 'no badge [%]',\n 'taught at least once', 'taught at least once [%]',\n ]\n writer = csv.DictWriter(self.stdout, fieldnames=fields)\n writer.writeheader()\n\n for training in self.trainings():\n badge = self.badge_type(training.tags.all())\n learners = self.learners(training)\n learners_len = learners.count()\n completed_len = learners.filter(badges=badge,\n award__event=training).count()\n completed_other_len = learners.filter(badges=badge) \\\n .exclude(award__event=training) \\\n .count()\n no_badge_len = learners.exclude(badges=badge).count()\n\n # Django tries to optimize every query; for example here I had to\n # cast to list explicitly to achieve a query without any\n # WHEREs to task__role__name (which self.learners() unfortunately\n # has to add).\n learners2 = Person.objects.filter(\n pk__in=list(learners.values_list('pk', flat=True)))\n\n # 1. Grab people who received a badge for this training\n # 2. Count how many times each of them taught\n instructors = learners2.filter(award__badge=badge,\n award__event=training)\\\n .annotate(\n num_taught=Count(\n Case(\n When(\n task__role__name='instructor',\n # task__event__start__gte=training.start,\n then=Value(1)\n ),\n output_field=IntegerField()\n )\n )\n )\n # 3. Get only people who taught at least once\n # 4. And count them\n instructors_taught_at_least_once = instructors \\\n .filter(num_taught__gt=0) \\\n .aggregate(Count('num_taught'))['num_taught__count'] or 0\n\n record = {\n fields[0]: universal_date_format(training.start),\n fields[1]: training.slug,\n fields[2]: int(self.online_tag in training.tags.all()),\n fields[3]: badge.title,\n fields[4]: learners_len,\n fields[5]: completed_len,\n fields[6]: self.percent(completed_len, learners_len),\n fields[7]: completed_other_len,\n fields[8]: self.percent(completed_other_len, learners_len),\n fields[9]: no_badge_len,\n fields[10]: self.percent(no_badge_len, learners_len),\n fields[11]: instructors_taught_at_least_once,\n fields[12]: self.percent(instructors_taught_at_least_once,\n learners_len),\n }\n writer.writerow(record)\n" }, { "alpha_fraction": 0.5592039823532104, "alphanum_fraction": 0.5592039823532104, "avg_line_length": 32.5, "blob_id": "0165c137e04f322ff7212173b52e700d0e46c73a", "content_id": "409ae0ae7067027dafa77ca1a70b4326e74ae5cd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1005, "license_type": "permissive", "max_line_length": 139, "num_lines": 30, "path": "/workshops/templates/workshops/all_profileupdaterequests.html", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "{% extends \"base_nav_fixed.html\" %}\n\n{% block content %}\n <div class=\"btn-group\" role=\"group\" aria-label=\"Type of requests\">\n <a href=\"{% url 'all_profileupdaterequests' %}\" class=\"btn btn-default{% if active_requests %} active{% endif %}\">New</a>\n <a href=\"{% url 'all_closed_profileupdaterequests' %}\" class=\"btn btn-default{% if not active_requests %} active{% endif %}\">Closed</a>\n </div>\n {% if requests %}\n <table class=\"table table-striped\">\n <thead>\n <tr>\n <th>person</th>\n <th>email</th>\n <th class=\"additional-links\"></th>\n </tr>\n </thead>\n <tbody>\n {% for req in requests %}\n <tr>\n <td>{{ req.personal }} {{ req.family }}</td>\n <td>{{ req.email|urlize }}</td>\n <td><a href=\"{{ req.get_absolute_url }}\"><span class=\"glyphicon glyphicon-info-sign\"></span></a></td>\n </tr>\n {% endfor %}\n </tbody>\n </table>\n {% else %}\n <p>No profile update requests matching the filter.</p>\n {% endif %}\n{% endblock %}\n" }, { "alpha_fraction": 0.5557337403297424, "alphanum_fraction": 0.5721732378005981, "avg_line_length": 34.12676239013672, "blob_id": "0c1a5baf076c56e1a895d69b81328ca4cc9f1072", "content_id": "9b8db34979e3d09ba48fe6e72988599b1807b28e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2494, "license_type": "permissive", "max_line_length": 72, "num_lines": 71, "path": "/workshops/test/test_invoice_requests.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "from datetime import date\n\nfrom .base import TestBase\nfrom ..models import InvoiceRequest, Event, Organization\nfrom ..forms import InvoiceRequestForm\n\n\nclass TestInvoiceRequestForm(TestBase):\n def setUp(self):\n self._setUpUsersAndLogin()\n\n def test_adding_minimal(self):\n \"\"\"Test submitting a minimalistic form ends up in\n Event.invoicerequest_set.\"\"\"\n event = Event.objects.create(\n slug='invoiceable-event', host=Organization.objects.first(),\n start='2016-02-09', admin_fee=2500,\n venue='School of Science',\n )\n data = {\n 'organization': event.host.pk,\n 'reason': 'admin-fee',\n 'date': event.start,\n 'event': event.pk,\n 'event_location': event.venue,\n 'contact_name': 'dr Jane Smith',\n 'contact_email': '[email protected]',\n 'full_address': 'dr Jane Smith, University of Florida',\n 'amount': event.admin_fee,\n 'vendor_form_required': 'no',\n 'receipts_sent': 'not-yet',\n }\n form = InvoiceRequestForm(data)\n self.assertTrue(form.is_valid())\n self.assertEqual(event.invoicerequest_set.count(), 0)\n form.save()\n self.assertEqual(event.invoicerequest_set.count(), 1)\n\n\nclass TestInvoiceRequest(TestBase):\n def setUp(self):\n self._setUpUsersAndLogin()\n\n def test_status_repr(self):\n \"\"\"Test if InvoiceRequest long status representation is fine.\"\"\"\n day = date(2016, 2, 9)\n event = Event.objects.create(\n slug='invoiceable-event', host=Organization.objects.first(),\n start=day, admin_fee=2500,\n venue='School of Science',\n )\n request = InvoiceRequest.objects.create(\n organization=event.host, date=event.start,\n event=event, event_location=event.venue,\n contact_name='dr Jane Smith', contact_email='[email protected]',\n full_address='dr Jane Smith, University of Florida',\n amount=event.admin_fee, form_W9=False,\n sent_date=day, paid_date=day,\n )\n\n tests = [\n ('not-invoiced', 'Not invoiced'),\n ('sent', 'Sent out on 2016-02-09'),\n ('paid', 'Paid on 2016-02-09'),\n ]\n\n for status, long_status in tests:\n with self.subTest(status=status):\n request.status = status\n # request.save()\n self.assertEqual(request.long_status, long_status)\n" }, { "alpha_fraction": 0.5138461589813232, "alphanum_fraction": 0.5211188793182373, "avg_line_length": 27.600000381469727, "blob_id": "5a35245cdfbc4a40638addf339b880779f08c869", "content_id": "c92af750a432ef984420b831b9d1a5bb01be2631", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 3579, "license_type": "permissive", "max_line_length": 159, "num_lines": 125, "path": "/workshops/templates/workshops/admin_dashboard.html", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "{% extends \"base_nav_fluid.html\" %}\n\n{% load compress %}\n{% load static %}\n\n{% block content %}\n\n<div class=\"row\">\n <div class=\"col-xs-12\"><h3>Timeline</h3></div>\n <div class=\"col-xs-12\" id=\"timeline\"></div>\n</div>\n\n{% if updated_metadata %}\n<div class=\"row\">\n <div class=\"col-xs-12\">\n <div class=\"alert alert-warning alert-dismissible\" role=\"alert\">\n <button type=\"button\" class=\"close\" data-dismiss=\"alert\" aria-label=\"Close\"><span aria-hidden=\"true\">&times;</span></button>\n <strong>Attention!</strong> {{ updated_metadata }} of your events had data on website updated. <a href=\"{% url 'events_metadata_changed' %}\">See more</a>\n </div>\n </div>\n</div>\n{% endif %}\n\n{% if is_admin or user.is_superuser %}\n<div class=\"row\">\n <div class=\"col-xs-12\">\n <div class=\"btn-group\" role=\"group\" aria-label=\"Events assignment\">\n <a href=\"?assigned_to=me\" class=\"btn btn-default{% if assigned_to == 'me' %} active{% endif %}\">Mine</a>\n <a href=\"?assigned_to=noone\" class=\"btn btn-default{% if assigned_to == 'noone' %} active{% endif %}\">Unassigned</a>\n <a href=\"?assigned_to=all\" class=\"btn btn-default{% if assigned_to == 'all' %} active{% endif %}\">All</a>\n </div>\n </div>\n</div>\n{% endif %}\n\n<div class=\"row\">\n <div class=\"col-xs-3\">\n <h3>Current</h3>\n <table class=\"table table-striped\">\n {% for event in current_events %}\n <tr>\n <td><a href=\"{{ event.get_absolute_url }}\">{{ event.slug }}</a></td>\n </tr>\n {% endfor %}\n </table>\n </div>\n <div class=\"col-xs-3\">\n <h3>Uninvoiced</h3>\n <table class=\"table table-striped\">\n {% for event in uninvoiced_events %}\n <tr>\n <td><a href=\"{{ event.get_absolute_url }}\">{{ event.slug }}</a></td>\n </tr>\n {% endfor %}\n </table>\n </div>\n <div class=\"col-xs-6\">\n <h3>Unpublished</h3>\n <table class=\"table table-striped\">\n <tr>\n <th>#I</th>\n <th>dates</th>\n <th>ID</th>\n <th>domain</th>\n <th>slug</th>\n </tr>\n {% for event in unpublished_events %}\n <tr>\n {% with num_instructors=event.task_set.instructors.count %}\n <td {% if num_instructors == 0 %}class=\"warning\"{% endif %}>\n {{ num_instructors }}\n </td>\n {% endwith %}\n <td>\n {% if event.start %}✓{% endif %}\n </td>\n <td>\n <a href=\"{{ event.get_absolute_url }}\">\n {{ event.id }}\n </a>\n </td>\n <td>\n <a href=\"{% url 'organization_details' event.host.domain %}\">\n {{ event.host }}\n </a>\n </td>\n <td {% if not event.slug %}class=\"warning\"{% endif %}>\n {% if not event.slug %}\n —\n {% else %}\n <a href=\"{% url 'event_details' event.slug %}\">\n {{ event.slug }}\n </a>\n {% endif %}\n </td>\n </tr>\n {% endfor %}\n </table>\n </div>\n</div>\n{% endblock %}\n\n{% block extrajs %}\n{% compress js %}\n<script src=\"{% static 'vis/dist/vis.js' %}\"></script>\n{% endcompress %}\n<script type=\"text/javascript\">\n $.getJSON(\"{% url 'api:user-todos' %}\", {}, function(data) {\n var container = document.getElementById('timeline');\n var items = new vis.DataSet(data);\n var options = {\n width: '100%',\n // height: '30px',\n clickToUse: true,\n timeAxis: {\n scale: 'day',\n step: 1\n },\n start: '{{ todos_start_date|date:'c' }}',\n end: '{{ todos_end_date|date:'c' }}'\n };\n var timeline = new vis.Timeline(container, items, options);\n });\n</script>\n{% endblock %}\n" }, { "alpha_fraction": 0.639244019985199, "alphanum_fraction": 0.6453585028648376, "avg_line_length": 35.71428680419922, "blob_id": "d7562aeb57731bb171eb1129096a3141b336cce1", "content_id": "592b92768a2fea021fd6a94cf47c2982a7dbfa05", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1799, "license_type": "permissive", "max_line_length": 336, "num_lines": 49, "path": "/workshops/migrations/0054_self_organized_host.py", "repo_name": "narayanaditya95/amy", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport re\n\nimport django\nfrom django.db import models, migrations\nfrom django.db.models import Q\n\n\ndef add_self_organized_host(apps, schema_editor):\n \"\"\"Make new host: self-organized.\"\"\"\n Host = apps.get_model('workshops', 'Host')\n Host.objects.create(domain='self-organized', fullname='self-organized',\n country='W3')\n\n\ndef update_administrator_to_self_organized(apps, schema_editor):\n \"\"\"Find all events that were self-organized and set administrator for them\n to be \"self-organized\".\"\"\"\n Host = apps.get_model('workshops', 'Host')\n self_org = Host.objects.get(fullname='self-organized')\n\n Event = apps.get_model('workshops', 'Event')\n Event.objects.filter(administrator__isnull=True) \\\n .filter(\n Q(invoice_status='na-self-org') |\n Q(notes__contains='self-organized') |\n Q(notes__contains='self organized')\n ) \\\n .update(administrator=self_org)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('workshops', '0053_merge'),\n ]\n\n operations = [\n # some missing migration, totally healthy (changes only validators for the field)\n migrations.AlterField(\n model_name='event',\n name='url',\n field=models.CharField(validators=[django.core.validators.RegexValidator(re.compile('https?://github\\\\.com/(?P<name>[^/]+)/(?P<repo>[^/]+)/?', 32), inverse_match=True)], unique=True, max_length=100, help_text='Setting this and startdate \"publishes\" the event.<br />Use link to the event\\'s website.', blank=True, null=True),\n ),\n\n migrations.RunPython(add_self_organized_host),\n migrations.RunPython(update_administrator_to_self_organized),\n ]\n" } ]
18
DasGuna/armer_ur
https://github.com/DasGuna/armer_ur
cf2f490f4398ce8051fea49a50da7a24dd771ab5
2f67b9d8a60e08ea3e77f7b625546341cc6dbf19
e38969ff0882d7c502af03b496ce65af9e64c5ea
refs/heads/main
2023-07-04T19:28:39.203019
2021-08-19T23:22:31
2021-08-19T23:22:31
394,886,318
0
0
null
2021-08-11T06:29:48
2021-07-28T22:57:18
2021-07-28T22:57:16
null
[ { "alpha_fraction": 0.6461538672447205, "alphanum_fraction": 0.6461538672447205, "avg_line_length": 13.44444465637207, "blob_id": "a875e3bc976e3a5fd616d9116ec64b2097ad7cbe", "content_id": "9c8146bfdba730150b956e13c249e351bd638897", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "permissive", "max_line_length": 49, "num_lines": 9, "path": "/armer_ur/robots/__init__.py", "repo_name": "DasGuna/armer_ur", "src_encoding": "UTF-8", "text": "\n\"\"\"\nRobots for Armer\n.. codeauthor:: Gavin Suddreys\n\"\"\"\nfrom armer_ur.robots.URROSRobot import URROSRobot\n\n__all__ = [\n 'URROSRobot'\n]" }, { "alpha_fraction": 0.7580775618553162, "alphanum_fraction": 0.7613085508346558, "avg_line_length": 42.438594818115234, "blob_id": "a7f36ac6b51e5cf725d7a90e36b1b2a34a49aa86", "content_id": "28b99823f1bcccbdf352109de3c7eb010c881e5f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2476, "license_type": "permissive", "max_line_length": 239, "num_lines": 57, "path": "/README.md", "repo_name": "DasGuna/armer_ur", "src_encoding": "UTF-8", "text": "# Armer UR\n[![QUT Centre for Robotics Open Source](https://github.com/qcr/qcr.github.io/raw/master/misc/badge.svg)](https://qcr.github.io)\n[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)\n[![Build Status](https://github.com/suddrey-qut/armer/workflows/Build/badge.svg?branch=master)](https://github.com/suddrey-qut/armer/actions?query=workflow%3ABuild)\n[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/suddrey-qut/armer.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/suddrey-qut/armer/context:python)\n[![Coverage](https://codecov.io/gh/suddrey-qut/armer/branch/master/graph/badge.svg)](https://codecov.io/gh/suddrey-qut/armer)\n\n*To be used with the [Armer Driver](https://github.com/qcr/armer)*\n\nThis package launches the UR drivers for use with the [Armer Driver](https://github.com/qcr/armer).\n\nIt interfaces with the [UR drivers](https://github.com/UniversalRobots/Universal_Robots_ROS_Driver) so they must be installed and built as well.\n\n## Installation\n\n### Preinstallation step: Install UR drivers\n1. Clone the driver and description to the Armer workspace\n\n```\ncd ~/armer_ws \ngit clone https://github.com/UniversalRobots/Universal_Robots_ROS_Driver.git src/Universal_Robots_ROS_Driver \ngit clone -b calibration_devel https://github.com/fmauch/universal_robot.git src/fmauch_universal_robot \ncd fmauch_universal_robot \nrm -r *moveit*\nrm -r *gazebo*\necho \"Completed download and removing Moveit and Gazebo files\"\n```\n2. Install dependencies and build workspace\n```\nsudo apt update \nrosdep update \nrosdep install --from-paths src --ignore-src -y\ncatkin_make\necho \"Completed dependency install\"\n```\n\nThe URCap helper program is also required for running on a physical robot.\n\n### Armer UR installation\nThe following code snippet will download the Armer UR hardware package to workspace ~/armer_ws. It will then install dependencies and rebuild the workspace.\n\n```\ncd ~/armer_ws\ngit clone https://github.com/qcr/armer_ur.git src/armer_ur\nrosdep install --from-paths src --ignore-src -r -y \ncatkin_make \n```\n\n## Usage\n```sh\nroslaunch armer_ur robot_bringup.launch \n```\n By default this will launch to control a physical UR5. To run a Swift simulation or specifiy a different UR model, the sim parameter can be set to true and the ur_model parameter can be set to the desired model such as \"ur3\". For example:\n\n```sh\nroslaunch armer_ur robot_bringup.launch sim:=true ur_model:=ur3\n```\n" }, { "alpha_fraction": 0.6422272324562073, "alphanum_fraction": 0.6449987292289734, "avg_line_length": 35.09090805053711, "blob_id": "9545220aa800514200c2e637c5e5e2746d8507bd", "content_id": "fb9dafbae7323001d0d54bd2568ba90e3dd9da33", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3969, "license_type": "permissive", "max_line_length": 210, "num_lines": 110, "path": "/armer_ur/robots/URROSRobot.py", "repo_name": "DasGuna/armer_ur", "src_encoding": "UTF-8", "text": "\"\"\"\nURROSRobot module defines the URROSRobot type\nURROSRobot provides robot-specific callbacks for recovery and setting impedance.\n.. codeauthor:: Gavin Suddreys\n\"\"\"\nimport rospy\nimport actionlib\nimport roboticstoolbox as rtb\n\nfrom armer.robots import ROSRobot\n\nfrom std_srvs.srv import EmptyRequest, EmptyResponse\nfrom std_srvs.srv import Trigger, TriggerRequest\n\nfrom armer_msgs.msg import ManipulatorState\n\nfrom armer_msgs.srv import \\\n SetCartesianImpedanceRequest, \\\n SetCartesianImpedanceResponse\n\nfrom ur_dashboard_msgs.msg import RobotMode, SafetyMode\n\n\n\nclass URROSRobot(ROSRobot):\n def __init__(self,\n robot: rtb.robot.Robot,\n controller_name: str = None,\n recover_on_estop: bool = True,\n *args,\n **kwargs):\n\n super().__init__(robot, *args, **kwargs)\n self.controller_name = controller_name \\\n if controller_name else self.joint_velocity_topic.split('/')[1]\n\n self.recover_on_estop = recover_on_estop\n self.last_estop_state = 0\n\n # UR state subscribers\n self.robot_state_subscriber = rospy.Subscriber(\n '/ur_hardware_interface/robot_mode',\n RobotMode,\n self.ur_robot_cb\n )\n self.safety_state_subscriber = rospy.Subscriber(\n '/ur_hardware_interface/safety_mode',\n SafetyMode,\n self.ur_safety_cb\n )\n self.last_estop_state = 0\n\n self.robot_state = None\n self.safety_state = None\n\n # Error recovery services\n self.unlock_proxy = rospy.ServiceProxy('/ur_hardware_interface/dashboard/brake_release', Trigger)\n self.unlock_proxy.wait_for_service()\n\n self.reset_proxy = rospy.ServiceProxy('/ur_hardware_interface/dashboard/play', Trigger)\n self.reset_proxy.wait_for_service()\n\n self.recover_cb(EmptyRequest())\n \n\n def recover_cb(self, req: EmptyRequest) -> EmptyResponse: # pylint: disable=no-self-use\n \"\"\"[summary]\n ROS Service callback:\n Invoke any available error recovery functions on the robot when an error occurs\n :param req: an empty request\n :type req: EmptyRequest\n :return: an empty response\n :rtype: EmptyResponse\n \"\"\"\n print('Recover')\n self.unlock_proxy(TriggerRequest())\n while not self.robot_state or self.robot_state.mode != RobotMode.RUNNING:\n rospy.sleep(1)\n print('Reset')\n self.reset_proxy(TriggerRequest())\n return EmptyResponse()\n\n def get_state(self):\n state = super().get_state()\n\n if self.robot_state:\n state.errors |= ManipulatorState.LOCKED if self.robot_state.mode == RobotMode.IDLE or self.robot_state.mode == RobotMode.POWER_OFF else 0\n \n if self.safety_state:\n state.errors |= ManipulatorState.ESTOP if self.safety_state.mode == SafetyMode.ROBOT_EMERGENCY_STOP else 0\n state.errors |= ManipulatorState.JOINT_LIMIT_VIOLATION | ManipulatorState.CARTESIAN_LIMIT_VIOLATION | ManipulatorState.TORQUE_LIMIT_VIOLATION if self.safety_state.mode == SafetyMode.VIOLATION else 0\n state.errors |= ManipulatorState.OTHER if self.safety_state.mode != SafetyMode.NORMAL and self.safety_state.mode != SafetyMode.ROBOT_EMERGENCY_STOP else 0 \n\n if self.safety_state and self.safety_state.mode == SafetyMode.NORMAL:\n if self.recover_on_estop and self.last_estop_state == 1:\n self.recover_cb(EmptyRequest())\n else:\n if state.errors & ManipulatorState.OTHER == ManipulatorState.OTHER:\n self.recover_cb(EmptyRequest())\n\n self.last_estop_state = 1 if self.safety_state and \\\n self.safety_state.mode == SafetyMode.ROBOT_EMERGENCY_STOP else 0\n\n return state\n\n def ur_robot_cb(self, msg):\n self.robot_state = msg\n\n def ur_safety_cb(self, msg):\n self.safety_state = msg" } ]
3
arrheni/bat_to_bash
https://github.com/arrheni/bat_to_bash
ea9d6f79334ab42020f8ff4299fcf936139a3f06
d7c313060130d207d19d90bdf72252f8d1634b81
768c58d50af525b2e6e7be5895bae645df47497c
refs/heads/master
2021-08-04T18:59:21.892242
2020-07-27T09:38:45
2020-07-27T09:38:45
203,123,873
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6694560647010803, "alphanum_fraction": 0.7196652889251709, "avg_line_length": 25.5, "blob_id": "32ef56bcf658af0fbe82f7742c93ad7ce8e7face", "content_id": "5020b43322923d75936007517e1567346a1c54a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 478, "license_type": "no_license", "max_line_length": 60, "num_lines": 18, "path": "/restart_was_dmg.sh", "repo_name": "arrheni/bat_to_bash", "src_encoding": "UTF-8", "text": "\n#!/bin/sh\ndmgrDir=/home/ap/was/AppServer/profiles/Dmgr01/bin\nprofileDir=/home/ap/was/AppServer/profiles/AppSrv01/bin\n\n\nkill -9 `ps -ef|grep dmgr|grep Dmgr01|awk '{print $2}'`\nkill -9 `ps -ef|grep node|grep AppSrv01|awk '{print $2}'`\nkill -9 `ps -ef|grep server1|grep AppSrv01|awk '{print $2}'`\nkill -9 `ps -ef|grep server2|grep AppSrv01|awk '{print $2}'`\n\ncd ${dmgrDir}\nsh startManager.sh \n\n\ncd ${profileDir}\nsh startNode.sh\nsh startServer.sh server1\nsh startServer.sh server2\n" }, { "alpha_fraction": 0.6115843057632446, "alphanum_fraction": 0.632027268409729, "avg_line_length": 18.566667556762695, "blob_id": "ebba5250df1d1bf0c6c5787107b8a644e18e8225", "content_id": "821192298464226cfaa0a271ab4147ae1a558730", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 711, "license_type": "no_license", "max_line_length": 58, "num_lines": 30, "path": "/sh_java.sh", "repo_name": "arrheni/bat_to_bash", "src_encoding": "UTF-8", "text": "#!/bin/sh\nJAR_PATH=removeEhcache.jar\necho '输入参数,j为精确查询, a是全部清除,q退出 ' \nread -p '输入参数: ' input_var_1\n\nif [ $input_var_1 = \"j\" ] ; then \n\techo \" 需输入参数,如(T_UDMP_SUB_SYSTEM_INFO,null) \"\n\tread -p \"输入:\" input_var_2\n\tPARAM=\"$input_var_2 udmp-InmutableCache 1\"\nelif [ $input_var_1 = \"a\" ] ; then\n\tPARAM=\"udmp-InmutableCache 2\"\nelse\n\techo \"退出。。。\"\n\texit 0\nfi\n#PARAM=\"T_UDMP_SUB_SYSTEM_INFO,null udmp-InmutableCache 1\"\n \necho \"执行命令:java -jar $JAR_PATH $PARAM\"\nVAR=$(java -jar $JAR_PATH $PARAM)\n \n# 判断调用是否成功\nif [ $? -ne 0 ]; then\n\t\techo \"=====调用失败 =====\" \n\t\texit 1\nfi\n \n# 成功获得返回值\necho $VAR\n \nexit 0\n" }, { "alpha_fraction": 0.634406566619873, "alphanum_fraction": 0.6506926417350769, "avg_line_length": 48.00917434692383, "blob_id": "00959cfd84a98cde693417e8ae2c10f45761ae65", "content_id": "6d18c24ecf62f2404b47140349c36da2eca8467d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5430, "license_type": "no_license", "max_line_length": 628, "num_lines": 109, "path": "/deploy_war_to_websphere.sh", "repo_name": "arrheni/bat_to_bash", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nd=$(date +%F)\n#d=\"2019-08-14\"\n\nbase_path=\"${WORKSPACE}\"\nmother_path=\"$base_path/local-webapp/target/local-webapp-0.5.4/\"\nwar_file=\"local-webapp-0.5.4.war\"\njar_list=\"nb-interface nb-impl nb-web uw-interface uw-impl uw-web pa-interface clm-interface cap-interface css-interface prd-interface prd-impl commonbiz-interface commonbiz-impl commonbiz-web fms-biz\"\n\nMETA_resources=\"nb uw common\"\n\n#########################################################################################################\nbackup_path=\"${base_path}/Backup/$d/\"\nfrom_path=\"WEB-INF/lib/\"\nto_class_path=\"WEB-INF/classes/\"\nMETA_resources_path=\"WEB-INF/classes/META-INF/resources/\"\nwebapp=\"src/main/webapp/\"\nresources=\"src/main/resources/\"\njava=\"src/main/java/\"\n\n#########################################################################################################\nfunction clean_path()\n{\n if [ -d $backup_path ] \n\t then\n\t\t rm -rf ${backup_path}*\n\t else\n\t\t mkdir -p $backup_path\n\t fi\n}\n\n############################## unzip war or jar #########################################################\nfunction unzip_jar()\n{\n\tfor jar in ${jar_list}\n\tdo\n\t\tunzip_jar=`ls ${mother_path}${from_path}${jar}*`\n\t\tunzip -qo ${unzip_jar} -d ${mother_path}${to_class_path}\n\t\tmv ${unzip_jar} ${backup_path}\n\tdone\n\t\n\tfor mr in $META_resources\n\tdo\n\t cp -r ${mother_path}${META_resources_path}${mr} ${mother_path}\n\t rm -r ${mother_path}${META_resources_path}${mr}\n\tdone\n}\n\n\n############################## zip jar .war ############################################################\nfunction zip_war_file()\n{\n\tcd $mother_path\n\tjar -cfM0 $war_file ./\n\tmv $war_file $backup_path\n\tcd $base_path\n\tfind Backup/ -maxdepth 1 -mindepth 1 -type d -mtime 7 -exec rm -rfv \"{}\" \\;\n}\n\nclean_path\nunzip_jar\nzip_war_file\n\n##########################################################################################################\n\n\n\nearfile=`ls ${WORKSPACE}/local-webapp/target/*.war|xargs basename`\nnode=`echo ${NODE_LABELS}`\nif [ $node == \"master\" ];then\n/home/ap/was/AppServer/profiles/AppSrv01/bin/wsadmin.sh -host $host -port 8882 -user $username -password $password -c '$AdminApp update '$appname' app {-operation update -contents '${WORKSPACE}'/local-webapp/target/'$earfile' -contextroot '$contextroot' -usedefaultbindings -MapResRefToEJB{{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpJndiDataSource javax.sql.DataSource jdbc/UdmpJndiDataSource}{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpDataSource javax.sql.DataSource jdbc/UdmpDataSource}{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpCommonDataSource javax.sql.DataSource jdbc/UdmpCommonDataSource}}}'\nelif [ $node == \"98.9\" ];then\n#time /home/fb/AppServer/profiles/AppSrv01/bin/wsadmin.sh -host $host -port 8882 -user $username -password $password -c '$AdminApp update '$appname' app {-operation update -contents '${backup_path}${war_file}' -contextroot '$contextroot' -usedefaultbindings -MapResRefToEJB{{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpJndiDataSource javax.sql.DataSource jdbc/UdmpJndiDataSource}{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpDataSource javax.sql.DataSource jdbc/UdmpDataSource}{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpCommonDataSource javax.sql.DataSource jdbc/UdmpCommonDataSource}}}'\nwhoami\n#time /home/fb/AppServer/profiles/AppSrv01/bin/wsadmin.sh -host $host -port 8882 -user $username -password $password -c '$AdminApp update '$appname' app {-operation update -contents '${WORKSPACE}'/local-webapp/target/'$earfile' -contextroot '$contextroot' -usedefaultbindings -MapResRefToEJB{{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpJndiDataSource javax.sql.DataSource jdbc/UdmpJndiDataSource}{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpDataSource javax.sql.DataSource jdbc/UdmpDataSource}{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpCommonDataSource javax.sql.DataSource jdbc/UdmpCommonDataSource}}}'\nfi\n\n\n\n\n\n\n\n\nsleep 120\ndir=\"/home/ap/was/AppServer/profiles/AppSrv02/config/cells/COREPTap0Node01Cell/applications/local-webapp1657a8a2050.ear/deployments/local-webapp1657a8a2050\"\n\nloaderStatus=$(grep 'classloaderMode=\"PARENT_FIRST\"' $dir/deployment.xml)\necho \"类加载顺序为PARENT_FIRST?====$loaderStatus\"\nloaderStatus1=$(grep 'classloaderMode=\"PARENT_LAST\"' $dir/deployment.xml)\necho \"类加载顺序为PARENT_LAST?=====$loaderStatus1\"\n\n#类加载顺序不为PARENT_LAST和PARENT_FIRST时\nif [ -z \"$loaderStatus\" ] && [ -z \"$loaderStatus1\" ];then\necho \"===========类加载顺序不为PARENT_LAST和PARENT_FIRST时=========\"\nrow=$(grep -n \"appdeployment:WebModuleDeployment\" $dir/deployment.xml|awk -F ':' '{print $1}')\nsed -i $row's/containsEJBContent=\"0\"/classloaderMode=\"PARENT_LAST\" containsEJBContent=\"0\"/' $dir/deployment.xml \npid=`ps -ef|grep server1|grep AppSrv02|grep -v grep|awk '{print $2}' ` && kill -9 $pid \nsh /home/ap/was/AppServer/profiles/AppSrv02/bin/startServer.sh server1 \n\n#类加载顺序为PARENT_FIRST 时\nelif [ ! -z \"$loaderStatus\" ] ;then\necho \"===========类加载顺序为PARENT_FIRST 时==========\"\nsed -i 's/classloaderMode=\"PARENT_FIRST\"/classloaderMode=\"PARENT_LAST\"/' `grep \"classloaderMode=\"PARENT_FIRST\"\" -rl $dir/deployment.xml` $dir/deployment.xml \npid=`ps -ef|grep server1|grep AppSrv02|grep -v grep|awk '{print $2}' ` && kill -9 $pid \nsh /home/ap/was/AppServer/profiles/AppSrv02/bin/startServer.sh server1 \n\nfi\n" }, { "alpha_fraction": 0.7546173930168152, "alphanum_fraction": 0.7704485654830933, "avg_line_length": 28.153846740722656, "blob_id": "4de5ce0cb17374b5c92b2460c834a5aa8117903d", "content_id": "82f10b0ee609f6aefc2b0dafd0bc7cf315083ef2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 379, "license_type": "no_license", "max_line_length": 122, "num_lines": 13, "path": "/README.md", "repo_name": "arrheni/bat_to_bash", "src_encoding": "UTF-8", "text": "# bat_to_bash\n\nbackup bat or bash which i used\n\n\nupload file form windows to linux or run a linux bash shell from windows bat \n\nbat upload_restart_websphere.bat need download suit https://www.putty.org/ and add scp.exe and putty.exe etc. to PATH\n\ndeploy_war_to_websphere.sh need websphere envirment\n\n\nhttps://unix.stackexchange.com/questions/127712/merging-folders-with-mv\n" }, { "alpha_fraction": 0.5817689895629883, "alphanum_fraction": 0.6140748858451843, "avg_line_length": 27.701753616333008, "blob_id": "1130e06f2a68b574aacbe73c11ca792772760d65", "content_id": "77a0e974cf0ccb696c987be02c5a735a6b141481", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 11641, "license_type": "no_license", "max_line_length": 593, "num_lines": 399, "path": "/deploy_increment_zip_or_unzip_war_or_rsync.sh", "repo_name": "arrheni/bat_to_bash", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nblue=\"\\033[0;36m\"\nred=\"\\033[0;31m\"\ngreen=\"\\033[0;32m\"\nclose=\"\\033[m\"\n\nd=$(date +%F)\n\necho -e \"--------------- must para 1 system in ( $green qry nb pa css jrqd $close)--------------------------\"\necho -e \"----------------must para 2 deploy in ( $green ql cbql cbzl $close)--------------------------\"\necho -e \"-------------------------- may para 3 the day before today to update -------------------------------\"\n\nif [ -z \"$1\" -a -z \"$2\" ] ;then \n\n\techo -e \"use the paragram need para $red at least 2 $close in qry nb pa css \"\n\t\n\texit 1\n\t\nfi\n\nsystem=$1\ndeploy=$2\nday=$3\n\n# svn log start_date <= svn log file < end_date st='2019-9-6T11:00'\nst=\"$(date +%F -d \"$day days ago\")T11:00\"\n\n# end_time et=\"$(date +%F)T12:00\"\net=\"$(date +%FT%R)\"\n\n#base_path=`pwd`\nbase_path=\"${WORKSPACE}\"\n\ncase $system in\n\n qry)\n\t\t\n\t\tmother_path=\"${base_path}/qry-web/target/qry-web/\"\n\t\t\n\t\twar_file=\"qry-web.war\"\n\t\t\n\t\tjar_list=\"nb-interface uw-interface pa-interface clm-interface cap-interface css-interface qry-interface qry-impl prd-interface prd-impl commonbiz-interface commonbiz-impl commonbiz-web fms-biz\"\n\t\t\n\t\tMETA_resources=\"common\"\n\t\t\n\t\tcat >svn_url <<EOF\nhttps://10.1.40.2/svn/P102_CBO/02Test/tag/NB-UW/ || nb-interface | uw-interface\nhttps://10.1.40.2/svn/P108_CAP/02Test/tag/CLM_CAP/ || pa-interface | clm-interface | cap-interface \nhttps://10.1.40.2/svn/P105_CSS/02Test/trunk/code/css/ || css-interface \nhttps://10.1.40.2/svn/P111_COB/01Dev/trunk/code/commonbiz/ || commonbiz-interface | commonbiz-impl | commonbiz-web\nhttps://10.1.40.2/svn/P105_CSS/02Test/trunk/code/qry/ || qry-interface | qry-impl | qry-web\nhttps://10.1.40.2/svn/P102_CBS/02Test/trunk/code/prd/ || prd-interface | prd-impl\nhttps://10.1.40.2/svn/P102_CBS/01Dev/trunk/code/fms/ || fms-biz\nEOF\n\n ;;\n\t\n nb)\n\t\n\t\tmother_path=\"$base_path/local-webapp/target/local-webapp-0.5.4/\"\n\t\t\n\t\twar_file=\"local-webapp-0.5.4.war\"\n\t\t\n\t\tjar_list=\"nb-interface nb-impl nb-web uw-interface uw-impl uw-web pa-interface clm-interface cap-interface css-interface prd-interface prd-impl commonbiz-interface commonbiz-impl commonbiz-web fms-biz\"\n\t\t\n\t\tMETA_resources=\"nb uw common\"\n\t\t\n\t\tcat >svn_url <<EOF\nhttps://10.1.40.2/svn/P102_CBO/02Test/tag/NB-UW/ || nb-interface | nb-impl |nb-web | uw-interface |uw-impl | uw-web |local-webapp \nhttps://10.1.40.2/svn/P108_CAP/02Test/tag/CLM_CAP/ || pa-interface | clm-interface | cap-interface \nhttps://10.1.40.2/svn/P105_CSS/02Test/trunk/code/css/ || css-interface \nhttps://10.1.40.2/svn/P111_COB/01Dev/trunk/code/commonbiz/ || commonbiz-interface | commonbiz-impl | commonbiz-web\nhttps://10.1.40.2/svn/P102_CBS/02Test/trunk/code/prd/ || prd-interface | prd-impl\nhttps://10.1.40.2/svn/P102_CBS/01Dev/trunk/code/fms/ || fms-biz\nEOF\n\n ;;\n\t\n pa)\n\t\t#编译完成的应用目录\n\t\tmother_path=\"$base_path/local-webapp/target/local-webapp-0.5.5.3/\"\n\t\t\n\t\t#编译生成的war包名称\n\t\twar_file=\"local-webapp-0.5.5.3.war\"\n\t\t\n\t\t#需要解压缩的jar包前缀\n\t\tjar_list=\"nb-interface uw-interface pa-interface pa-impl pa-web clm-interface clm-impl clm-web cap-interface cap-impl cap-web css-interface prd-interface prd-impl commonbiz-interface commonbiz-impl commonbiz-web fms-biz\"\n\t\t\n\t\t#需要从META resources移除的目录\n\t\tMETA_resources=\"pa cs clm cap common mobcss images\"\n\t\t\n\t\tcat >svn_url <<EOF\nhttps://10.1.40.2/svn/P102_CBO/02Test/tag/NB-UW/ || nb-interface | uw-interface\nhttps://10.1.40.2/svn/P108_CAP/02Test/tag/CLM_CAP/ || pa-interface |pa-impl |pa-web | clm-interface| clm-impl |clm-web | cap-interface |cap-impl |cap-web | local-webapp\nhttps://10.1.40.2/svn/P105_CSS/02Test/trunk/code/css/ || css-interface \nhttps://10.1.40.2/svn/P111_COB/01Dev/trunk/code/commonbiz/ || commonbiz-interface | commonbiz-impl | commonbiz-web\nhttps://10.1.40.2/svn/P102_CBS/02Test/trunk/code/prd/ || prd-interface | prd-impl\nhttps://10.1.40.2/svn/P102_CBS/01Dev/trunk/code/fms/ || fms-biz\nEOF\n\n\n#svn_url=\"https://10.1.40.2/svn/P102_CBO/02Test/tag/NB-UW/ || nb-interface | uw-interface\n#https://10.1.40.2/svn/P108_CAP/02Test/tag/CLM_CAP/ || pa-interface |pa-impl |pa-web | clm-interface| clm-impl |clm-web | cap-interface |cap-impl |cap-web\n#https://10.1.40.2/svn/P105_CSS/02Test/trunk/code/css/ || css-interface \n#https://10.1.40.2/svn/P111_COB/01Dev/trunk/code/commonbiz/ || commonbiz-interface | commonbiz-impl | commonbiz-web\n#https://10.1.40.2/svn/P102_CBS/02Test/trunk/code/prd/ || prd-interface | prd-impl\n#https://10.1.40.2/svn/P102_CBS/01Dev/trunk/code/fms/ || fms-biz\"\n\n ;;\n\t\n\tcss)\n\t\n\t;;\n\t*)\n echo -e \" $red Input error!! $close only support ( qry nb pa css ) system to update $red----EXIT----$close \"\n\t\t\n\t\texit 1\n ;;\nesac\n\n\n\n\n#########################################################################################################\n#增量文件列表,svn 生成\nsvn_update_list=\"svn_update_list_$system_$d.txt\"\n\t\t\n#生成的增量zip包\nincrement_zip=\"increment_$system_$d.zip\"\n\nbackup_path=\"${base_path}/Backup/$d/\"\nrsync_path=\"${backup_path}Rsync/\"\n\nupdate_file_list=\"update_file_list_$d.txt\"\njava_list=\"java_list_$d.txt\"\n\nfrom_path=\"WEB-INF/lib/\"\nto_class_path=\"WEB-INF/classes/\"\nMETA_resources_path=\"WEB-INF/classes/META-INF/resources/\"\n\nwebapp=\"src/main/webapp/\"\nresources=\"src/main/resources/\"\njava=\"src/main/java/\"\n\n\n\n\nfunction clean_backup_path(){\n\n\tfind Backup/ -maxdepth 1 -mindepth 1 -type d -mtime +7 -exec rm -rf \"{}\" \\;\n\t\n\tif [ -d ${backup_path} ]; then\n\t\n\t\trm -rf ${backup_path}*\n\n\telse\n\t\n\t\tmkdir -p ${backup_path}\n\t\n\tfi\n\t\n}\n\nfunction unzip_jar() {\n\n\tfor jar in ${jar_list}; do\n\t\n\t\tunzip_jar=$(ls ${mother_path}${from_path}${jar}*)\n\t\t\n\t\tunzip -qo ${unzip_jar} -d ${mother_path}${to_class_path}\n\t\t\n\t\tmv ${unzip_jar} ${backup_path}\n\t\t\n\tdone\n\n\tfor mr in ${META_resources}; do\n\t\n\t\tcp -r ${mother_path}${META_resources_path}${mr} ${mother_path}\n\t\t\n\t\trm -r ${mother_path}${META_resources_path}${mr}\n\t\t\n\tdone\n\n}\n\n\nfunction get_unjar_full_war_file() {\n\n\tcd ${mother_path}\n\t\n\tjar -cfM0 ${war_file} ./\n\t\n\tmv ${war_file} ${backup_path}\n\t\n\tcd ${base_path}\n\t\n}\n\nfunction get_svn_update_list() {\n\n\techo -e \"-----get svn update file list from $red $st $close to $green $et $close svn change file is $blue ${svn_update_list} $close -----\"\n\n\tcat /dev/null >${svn_update_list}\n\t\n\t#echo \"$svn_url\"| sed 's/[[:space:]]//g' | while read line;do\n\tcat svn_url | sed 's/[[:space:]]//g' | while read line; do\n\t\n\t\turl=${line%||*}\n\t\t\n\t\tkey_words=${line#*||}\n\n\t\tsvn diff -r {$st}:{$et} $url --summarize | awk -F $url '/.+[.][^./]+$/ {if($1==\"D \") {print $1 $2} else{print $1 $2}}' | grep -E \"$key_words\" | tee -a ${svn_update_list}\n\n\tdone\n\t\n}\n\nfunction get_uncheck_list(){\n\n\t\tgrep \"$webapp\" $1 | awk -F \"$webapp\" '{print $2}' >$2\n\t\t\n\t\tgrep \"$resources\" $1 | awk -F \"$resources\" '{print \"WEB-INF/classes/\" $2}' >>$2\n\t\t\n\t\tgrep \"$java\" $1 | awk -F \"$java\" '{print \"WEB-INF/classes/\" $2}' >>$3\n\n}\n\nfunction get_check_list() {\n\n\tfor mr in ${META_resources}; do\n\t\n\t\tsed -i \"s#${META_resources_path}${mr}#${mr}#g\" $1\n\t\t\n\tdone\n\n}\n\n\nfunction get_delete_file_list() {\n\n\tgrep \"D \" ${svn_update_list} | tee ibm-partialapp-delete\n\n\tif [ -s ibm-partialapp-delete ]; then\n\t\n\t\techo -e \"-------------------------there is $red deleted files $close ABOVE------------------------------------\"\n\n\t\tsed -i.bak \"/D /d\" ${svn_update_list}\n\t\t\n\t\tget_uncheck_list ibm-partialapp-delete ibm-partialapp-delete.props ibm-partialapp-delete.props\n\n\t\tsed -i \"s/\\.java/\\.class/g\" ibm-partialapp-delete.props\n\t\t\n\t\t###有bug,若删除的java有内部类,则需从未删除(上一天)的war中获取,不想写了。\n\n\n\t\tget_check_list ibm-partialapp-delete.props\n\n\t\tcp ibm-partialapp-delete.props ${mother_path}META-INF\n\t\t\n\tfi\n\t\n\trm ibm-partialapp-delete\n\n}\n\nfunction get_add_or_update_file_list() {\n\n\tget_uncheck_list ${svn_update_list} ${update_file_list} ${java_list}\n\n\tcd ${mother_path}\n\t\n\tawk -F \"[.]\" '{system(\"ls \" $1 \".class\")}{system(\"ls \" $1 \"\\\\$*.class\")}' ${base_path}/${java_list} >>${base_path}/${update_file_list} 2>/dev/null\n\n\tcd ${base_path}\n\t\n\tget_check_list ${update_file_list}\n\t\n\tif [ -s ibm-partialapp-delete.props ]; then\n\t\n\t\twc ibm-partialapp-delete.props\n\t\t\n\t\techo \"META-INF/ibm-partialapp-delete.props\" >>${update_file_list}\n\t\t\n\t\tmv ibm-partialapp-delete.props ${backup_path}\n\t\t\n\tfi\n\n\trm ${java_list}\n\t\n\twc ${svn_update_list}* ${update_file_list}\n\n\techo -e \"------------------ $red check the files numbers $close ----------------------\"\n\t\n\tgrep '\\$' ${update_file_list}\n\t\n}\n\nfunction get_today_jar_in_update_file_list(){\n\n cd $mother_path\n\t\n\tfind WEB-INF/lib/* -mtime 0 >> ${base_path}/${update_file_list}\n\n cd ${base_path}\n\n}\n\nfunction rsync_update_file() {\n\n\tmkdir -p ${rsync_path}\n\n\trsync --files-from=${update_file_list} ${mother_path} ${rsync_path}${war_file}\n\n\tmv ${svn_update_list}* ${update_file_list} ${backup_path}\n\n}\n\nfunction get_update_zip_file() {\n\n\tcd ${rsync_path}\n\t\n\t[ -f ${increment_zip} ] && rm -v ${increment_zip}\n\t\n\tzip -qr ${increment_zip} ./\n\t\n\tmv ${increment_zip} ${backup_path}\n\t\n\tcd ${base_path}\n\t\n}\n\nfunction deploy(){\n\n\tif [ ${NODE_LABELS} == \"master\" ]; then\n\t\n\t\twasadmin=\"/home/ap/was/AppServer/profiles/AppSrv01/bin/wsadmin.sh\"\n\t\n\telif [ ${NODE_LABELS} == \"98.9\" ]; then\n\t\n\t\twasadmin=\"/home/fb/AppServer/profiles/AppSrv01/bin/wsadmin.sh\"\n\t\n\tfi\n\n case $1 in\n\t\n\t\t#全量部署 \n ql)\n time $wasadmin -host $host -port $port -user $username -password $password -c '$AdminApp update '$appname' app {-operation update -contents '$mother_path'/../'$war_file' -contextroot '$contextroot' -usedefaultbindings -MapResRefToEJB{{'$war_file' .* '$war_file',WEB-INF/web.xml jdbc/UdmpJndiDataSource javax.sql.DataSource jdbc/UdmpJndiDataSource}{'$war_file' .* '$war_file',WEB-INF/web.xml jdbc/UdmpDataSource javax.sql.DataSource jdbc/UdmpDataSource }{'$war_file' .* '$war_file',WEB-INF/web.xml jdbc/UdmpCommonDataSource javax.sql.DataSource jdbc/UdmpCommonDataSource }}}'\n ;;\n\t\t\n\t\t#拆包全量部署 \n cbql)\n time $wasadmin -host $host -port $port -user $username -password $password -c '$AdminApp update '$appname' app {-operation update -contents '${backup_path}${war_file}' -contextroot '$contextroot' -usedefaultbindings -MapResRefToEJB{{'$war_file' .* '$war_file',WEB-INF/web.xml jdbc/UdmpJndiDataSource javax.sql.DataSource jdbc/UdmpJndiDataSource}{'$war_file' .* '$war_file',WEB-INF/web.xml jdbc/UdmpDataSource javax.sql.DataSource jdbc/UdmpDataSource }{'$war_file' .* '$war_file',WEB-INF/web.xml jdbc/UdmpCommonDataSource javax.sql.DataSource jdbc/UdmpCommonDataSource }}}'\n ;;\n\t\t\n\t\t#拆包增量部署 \n cbzl)\n if [ -s ${backup_path}${increment_zip} ] ; then \n\t\t\t\ttime $wasadmin -host $host -port $port -user $username -password $password -c '$AdminApp update '$appname' partialapp {-contents '${backup_path}${increment_zip}'}'\n\t\t\telse\n\t\t\t\techo -e \" $red ERR ,there is no $increment_zip -------------\"\n\t\t\tfi\n ;;\n *)\n echo \"Input error!!\"\n ;;\n esac\n\n}\n\n\nclean_backup_path\n\nunzip_jar\n\nget_unjar_full_war_file\n\nget_svn_update_list\n\nif [ -s ${svn_update_list} ]; then\n\n\tget_delete_file_list\n\t\n\tget_add_or_update_file_list\n\t\n\tget_today_jar_in_update_file_list\n\t\n\trsync_update_file\n\t\n\tget_update_zip_file\n\t\nelse\n\n\techo -e \"------------------------------ $red There must be a list file path in ${svn_update_list} $close-------------------------------\"\n\t\nfi\n\ndeploy $deploy\n\n" }, { "alpha_fraction": 0.465214341878891, "alphanum_fraction": 0.49824315309524536, "avg_line_length": 27.459999084472656, "blob_id": "bf11c5b815b19c18919f642fc2ad254396950284", "content_id": "5a05786a226502472778a2f7a68327e2f6afb729", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1423, "license_type": "no_license", "max_line_length": 162, "num_lines": 50, "path": "/get_svn_chage_file_by_date_or_time.sh", "repo_name": "arrheni/bat_to_bash", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"---------------para 1 system in ( qry nb pa css )--------------------------\"\necho \"---------------para 2 the day before today to update ----------------------\"\necho \"---------------need svn_url_qry.txt file to get list ----------------------\"\necho \"---------------------------------------------------------------------------\"\n\nif [ -z \"$1\" ] ;then \n\techo \"use the paragram need para at least 1 like get_svn_update_list.sh qry\"\n\texit 1\nelif [ $1 = \"qry\" -o $1 = \"nb\" -o $1 = \"pa\" -o $1 = \"css\" ] ; then\n\tupdate_system=$1\n\tsvn_url=svn_url_$1.txt\n\tif [ ! -r $svn_url ] ; then\n\t\techo \" $svn_url does not exit ~~~~~~~~~~~~\"\n\texit 1\n\tfi\nelse\n\techo \"support ( qry nb pa css ) system to update svn list \"\n\texit 1\nfi\n\n# start_date <= svn log file < end_date\n# start_time\n# st='2019-9-6T11:00' \n\nst=\"$(date +%F -d \"$2 days ago\")T11:00\"\n\n# end_time\n# et='2019-8-10T12:00'\n\net=\"$(date +%F)T12:00\"\net=\"$(date +%FT%R)\"\n\nd=$(date +%F)\nlist_file=\"list_${update_system}_$d.txt\"\n\necho \"get update file list from \" $st \" to \" $et \" svn change file \" $list_file\n\ncat /dev/null > $list_file\n\n#cat svn_url.txt |sed 's/[[:space:]]//g' | while read line\ncat $svn_url |sed 's/[[:space:]]//g' | while read line\n\ndo\n\turl=${line%||*}\n\tkey_words=${line#*||}\n\tsvn diff -r {$st}:{$et} $url --summarize |awk -F $url '/.+[.][^./]+$/ {if($1==\"D \") {print $1 $2} else{print $2}}' |grep -E \"$key_words\" |tee -a $list_file\n\ndone\n" }, { "alpha_fraction": 0.6677649617195129, "alphanum_fraction": 0.7040087580680847, "avg_line_length": 32.109092712402344, "blob_id": "c4b5f16527b6931f69971c022083744b85490c60", "content_id": "6b14dfd0ca1a44dcc7f0738a316bd944c8def9c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1979, "license_type": "no_license", "max_line_length": 194, "num_lines": 55, "path": "/Deploy_Merge_NB_UW.sh", "repo_name": "arrheni/bat_to_bash", "src_encoding": "UTF-8", "text": "#!/bin/bash\nq_dir=/home/ap/Incremental/NB_UW\n\ndate_dir=`date +%F` ##今日日期\nbackup_dir=$q_dir/backup/$date_dir/$Version\npublish_dir=$q_dir/publish/$date_dir/$Version\n#发布包备份路径\ntest -d $backup_dir|| mkdir -p $backup_dir\n#最终发布包路径\ntest -d $publish_dir || mkdir -p $publish_dir\n\ncp $q_dir/rsync.sh $publish_dir\n\nfunction getJar()\n{\ncat $q_dir/$date_dir/result|while read line\ndo\nscp [email protected]:/home/ap/was/AppServer/profiles/AppSrv01/installedApps/COREPTap1Cell01/local-webapp.ear/local-webapp-521.0.1.war/WEB-INF/lib/$line*.jar $backup_dir ##回归环境把包拽下来放到备份文件夹\n\n#scp [email protected]:/home/ap/was/AppServer/profiles/AppSrv01/installedApps/COREPTap1Cell01/local-webapp20190725.ear/local-webapp-521.0.1.war/WEB-INF/lib/$line*.jar $backup_dir ##临时分支\necho $line\n\ncp $backup_dir/$line*.jar $q_dir/$date_dir/WEB-INF/lib/$line*/ ##把包复制到/home/ap/Incremental/NB_UW/WEB-INF/lib/下\ndone\n}\n\n#制作回归发布包\nfunction MergeCode(){\ncat $q_dir/$date_dir/result|while read line\ndo\ncd $q_dir/$date_dir/WEB-INF/lib/$line*/\narray=(com META-INF)\nfor var in ${array[@]};\ndo\nls -l $var > /dev/null 2>&1 #黑洞\nif [ $? -eq 0 ];then\n jar uvfM $line*.jar $var ##合包命令\nfi\ndone\n[[ ! -d $publish_dir/NB_UW/WEB-INF/lib ]] && mkdir -p $publish_dir/NB_UW/WEB-INF/lib\ncp $q_dir/$date_dir/WEB-INF/lib/$line*/$line*.jar $publish_dir/NB_UW/WEB-INF/lib ##复制最终发布包到/home/ap/Incremental/NB_UW/publish下\ndone\n}\n#将回归发布文件上传至FTP服务器\nfunction upPublishDoc(){\ncd $publish_dir\nftp_dir=\"/home/ftpuser/PublishPackage/`date +%Y%m%d`\"\nwget --ftp-user=ftpuser --ftp-password=user123 --no-proxy ftp://10.1.95.54/PublishPackage/`date +%Y%m%d`/version.txt\nMi_FTP_Version=$(cat version.txt)\nscp -r NB_UW [email protected]:$ftp_dir/$Mi_FTP_Version\nscp -r NB_UW [email protected]:/home/ap/Incremental/publish_script/jar\n}\ngetJar\nMergeCode\nupPublishDoc\n" }, { "alpha_fraction": 0.6654294729232788, "alphanum_fraction": 0.6887592673301697, "avg_line_length": 42.86046600341797, "blob_id": "8405ac78882e980ca764812676687991394f46f3", "content_id": "f573186bebf59aa6c8e03cd35555524162478144", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1886, "license_type": "no_license", "max_line_length": 562, "num_lines": 43, "path": "/update_websphere_ear.sh", "repo_name": "arrheni/bat_to_bash", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho AP1 pa clm cap\n\nhost=\"10.1.95.47\"\nsoapport=\"8883\"\nusername=\"wasadmin\"\npassword=\"~~~~~~\"\nap1_appname=\"local-webapp-0_5_5_2_war\"\ncontextroot=\"ls\"\nname_key=\"AP2*PA_CLM_CAP.ear\"\n\nAppSrv=\"AppSrv02\"\napp_path=\"/home/ap/was/AppServer/profiles/${AppSrv}\"\nwsadmin=${app_path}/bin/wsadmin.sh\near_path=\"${app_path}/logs/\"\near_file=`find ${ear_path} -name ${name_key} -a -mtime -1 | sort -r | head -1 `\n\n\nupdate_ap1() {\n earfile=`basename ${ear_file}`\n\techo \"update AP1 from AP2 ${ear_file}\"\t\n\t#${wsadmin} -host ${host} -port ${soapport} -user $username -password $password -c '$AdminApp update '$ap1_appname' app {-operation update -contents '${ear_file}' -contextroot '$contextroot' -usedefaultbindings -MapResRefToEJB{{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpJndiDataSource javax.sql.DataSource jdbc/UdmpJndiDataSource}{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpDataSource javax.sql.DataSource jdbc/UdmpDataSource}{'$earfile' .* '$earfile',WEB-INF/web.xml jdbc/UdmpCommonDataSource javax.sql.DataSource jdbc/UdmpCommonDataSource}}}'\n\techo ${wsadmin} -lang jython -host ${host} -port ${soapport} -user $username -password $password -c \"AdminApplication.updateApplicationUsingDefaultMerge(\"${ap1_appname}\", \"${ear_file}\")\"\n time ${wsadmin} -lang jython -host ${host} -port ${soapport} -user $username -password $password -c \"AdminApplication.updateApplicationUsingDefaultMerge(\\\"${ap1_appname}\\\", \\\"${ear_file}\\\")\"\n ls -lrth ../../AppSrv04/installedApps/qrytap0Node04Cell/local-webapp-0_5_5_2_war.ear/local-webapp-0.5.5.3.war/WEB-INF/lib/ | tail\n}\n\nif [ -f \"${ear_file}\" ];then\necho ==== use ear file === $ear_file\n\tupdate_ap1\nelse\n\techo \"there is no ear file in ${ear_path}\"\n\texit\nfi\n\nold_ear_file=`find $ear_path -name \"*.ear\" -a -mtime +30 | tr '\\n' ' ' `\nif [ -n \"$old_ear_file\" ]\n\nthen \n echo \"rm $old_ear_file\"\n rm -v $old_ear_file\nfi\n" }, { "alpha_fraction": 0.569779634475708, "alphanum_fraction": 0.5876180529594421, "avg_line_length": 21.690475463867188, "blob_id": "a4144135b7d76ca54a7ab5d70ba3f97397de9e31", "content_id": "62cc3222da0f913f6e7b3958caebacb8faf45da8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1027, "license_type": "no_license", "max_line_length": 68, "num_lines": 42, "path": "/python3_check_websites.py", "repo_name": "arrheni/bat_to_bash", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport urllib2\nimport time\n \nopener = urllib2.build_opener()\nopener.addheaders = [('User-agent', 'Mozilla/5.0')]\nfile = open('addr.txt')\nlines = file.readlines()\naa=[]\nprint('--------------------------开始第一次检查--------------------------')\nfor line in lines:\n\tif line.find('http')!=(-1):\n\t\tindex=line.index('http')\n\t\turl=line[index:].replace('\\n','')\n\t\ttry:\n\t\t\tcode=urllib2.urlopen(url).getcode()\n\t\t\tprint(url+'\\tstatus code is: '+bytes(code))\n\t\texcept urllib2.URLError:\n\t\t\tprint(url+'\\t无法访问 ')\n\t\texcept urllib2.HTTPError:\n\t\t\tprint(url+'\\t无法访问 ')\n\telse:\n\t\tcontinue\n\t\n\taa.append(url)\n \ntime.sleep(2)\n \nprint('--------------------------开始第二次检查--------------------------')\nfor a in aa:\n\ttempUrl = a\n\ttry :\n\t\topener.open(tempUrl)\n\t\tprint(tempUrl+'\\t没问题')\n\texcept urllib2.HTTPError:\n\t\tprint(tempUrl+'\\t访问页面出错')\n\t\ttime.sleep(2)\n\texcept urllib2.URLError:\n\t\tprint(tempUrl+'\\t访问页面出错')\n\t\ttime.sleep(2)\n\ttime.sleep(0.1)\n" }, { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.6733871102333069, "avg_line_length": 32.06666564941406, "blob_id": "c40d6db2f8ef878799fd378320ab1693aba124e8", "content_id": "8668ba20a0c5d485c1ee1d3257e1809e3a384bc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 496, "license_type": "no_license", "max_line_length": 102, "num_lines": 15, "path": "/backup_websphere_ear.sh", "repo_name": "arrheni/bat_to_bash", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nbackup_ear=\"AP2_95.47_`date +%Y%m%d-%H%M%S`_QRY.ear\"\ntarget_path=\"/home/ap/was/AppServer/profiles/AppSrv03/logs\"\nsource_path=\"/home/ap/was/AppServer/profiles/AppSrv03/installedApps/qrytap0Node03Cell/qry-web.ear\"\n\n./EARExpander.sh -ear ${target_path}/${backup_ear} -operationDir ${source_path} -operation collapse\n\nold_ear_file=`find $target_path -name \"*.ear\" -a -mtime +30 | tr '\\n' ' ' `\nif [ -n \"$old_ear_file\" ]\n\nthen \n echo \"rm $old_ear_file\"\n rm -v $old_ear_file\nfi\n" } ]
10
fonsp/the-wright-brothers
https://github.com/fonsp/the-wright-brothers
815efc5b61b8e03a529ca6fa0c5e15a3fb35e1bb
c504ee969909282d5ed628b8149a0d52b6202a94
01cf938e3b20bb4f46090a52876a5c71c8da34fc
refs/heads/master
2022-04-12T21:35:05.869929
2020-03-06T13:36:25
2020-03-06T13:36:25
112,214,641
0
0
null
2017-11-27T15:37:15
2019-06-11T06:00:54
2019-06-11T23:32:32
Python
[ { "alpha_fraction": 0.7426900863647461, "alphanum_fraction": 0.7426900863647461, "avg_line_length": 33.20000076293945, "blob_id": "876f459f86442b878e4a188349a35890be752242", "content_id": "916e8846cfb7d50fe78fa2a1317dd544b0250b54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 171, "license_type": "no_license", "max_line_length": 133, "num_lines": 5, "path": "/README.md", "repo_name": "fonsp/the-wright-brothers", "src_encoding": "UTF-8", "text": "# the-wright-brothers\n\n[![Build Status](https://travis-ci.com/fonsp/the-wright-brothers.svg?branch=master)](https://travis-ci.com/fonsp/the-wright-brothers)\n\n**FONZYYYY**\n" }, { "alpha_fraction": 0.5872150659561157, "alphanum_fraction": 0.5996035933494568, "avg_line_length": 28.676469802856445, "blob_id": "784904905582b7538a7b809de43b053bc85e477e", "content_id": "9aa2552b773a67f840d22129de4330d09ff2c75a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2018, "license_type": "no_license", "max_line_length": 103, "num_lines": 68, "path": "/wekkertje/checker.py", "repo_name": "fonsp/the-wright-brothers", "src_encoding": "UTF-8", "text": "from functools import reduce\n\n\ndef check(verbose=False):\n\n # %%\n\n with open(\"geheimwoordje.txt\", mode=\"r\") as f:\n geheimwoordje_text = f.readline()\n with open(\"sleutel.txt\", mode=\"r\") as f:\n sleutel_text = f.readline()\n with open(\"antwoord.txt\", mode=\"r\") as f:\n antwoord_text = f.readline()\n\n # %%\n\n # geheimwoordje.txt bevat:\n # een of meerdere woorden; leestekens en spaties worden genegeerd, alle letters worden hoofdletters\n toegestaan = \"abcdefghijklmnopqrstuvwxyz\"\n geheimwoordje_verwerkt = \"\".join(char for char in geheimwoordje_text.lower() if char in toegestaan)\n if verbose:\n print(\"Verwerkt: {}\".format(geheimwoordje_verwerkt))\n\n # toets 0 1 2 3 4 5 6 7 8 9\n nokia = [\"\", \"\", \"abc\", \"def\", \"ghi\", \"jkl\", \"mno\", \"pqrs\", \"tuv\", \"wxyz\"]\n welke_toets = lambda char: next(toets for toets, letters in enumerate(nokia) if char in letters)\n\n geheimwoordje_gecodeerd = list(map(welke_toets, geheimwoordje_verwerkt))\n\n geheime_code = reduce(lambda x, y: 10 * x + y, geheimwoordje_gecodeerd)\n if verbose:\n print(\"Geheime code: {}\".format(geheime_code))\n\n # Bijvoorbeeld:\n # -------\n # geheimwoordje_text == \"app EL!\"\n # ->\n # geheimwoordje_verwerkt == \"appel\"\n # ->\n # geheimwoordje_gecodeerd == [2, 7, 7, 3, 5]\n # ->\n # geheime_code == 27735\n\n # %%\n\n # sleutel.txt bevat:\n # twee ints, gescheiden door een komma of spatie\n sleutel = [int(s) for s in sleutel_text.replace(\",\", \" \").split(\" \")]\n assert(len(sleutel) == 2)\n\n antwoord_moet_zijn = (geheime_code * sleutel[0]) % sleutel[1]\n if verbose:\n print(\"Antwoord moet zijn: {}\".format(antwoord_moet_zijn))\n\n # %%\n\n # antwoord.txt bevat:\n # een int\n\n antwoord_gegeven = int(antwoord_text)\n if verbose:\n print(\"Antwoord gegeven: {}\".format(antwoord_gegeven))\n\n return antwoord_gegeven == antwoord_moet_zijn\n\n\nif __name__ == \"__main__\":\n assert(check(True))\n" }, { "alpha_fraction": 0.5925925970077515, "alphanum_fraction": 0.5965608358383179, "avg_line_length": 29.239999771118164, "blob_id": "0354d8c0e9fdd9afcb5f6d511c678c2fa8fb9c35", "content_id": "573f8e19a82bb30a4891a2487e8ef67bb31a0e62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 756, "license_type": "no_license", "max_line_length": 89, "num_lines": 25, "path": "/wekkertje/wekker.py", "repo_name": "fonsp/the-wright-brothers", "src_encoding": "UTF-8", "text": "import subprocess\nimport time\nfrom checker import check\nfrom morse import play_text, lampuit\n\n\ndef refresh_git(also_do_hard_reset=True):\n if also_do_hard_reset:\n subprocess.call(\"git reset --hard\", shell=True, stdout=subprocess.PIPE, cwd=\"..\")\n subprocess.call(\"git pull\", shell=True, stdout=subprocess.PIPE, cwd=\"..\")\n\n\nif __name__ == \"__main__\":\n while True:\n refresh_git()\n if check():\n lampuit()\n # 1 minuutje wachten tot de volgende pull\n # te vaak pullen wordt misschien geblokkeerd\n time.sleep(60)\n else:\n with open(\"geheimwoordje.txt\") as f:\n geheimwoordje_text = f.readline()\n play_text(geheimwoordje_text)\n lampuit()\n" }, { "alpha_fraction": 0.33893558382987976, "alphanum_fraction": 0.3523809611797333, "avg_line_length": 26.461538314819336, "blob_id": "49fed851e2fc8f45745218a681d97ca44bb17d41", "content_id": "020f1fdb6f04cd5dcaa4c8944238b35df1feaadf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1785, "license_type": "no_license", "max_line_length": 66, "num_lines": 65, "path": "/wekkertje/morse.py", "repo_name": "fonsp/the-wright-brothers", "src_encoding": "UTF-8", "text": "import onionGpio\nimport time\nimport sys\n\ng = onionGpio.OnionGpio(11)\ng.setOutputDirection(0)\n\nunit_length = .1\n\nlampaan = lambda: g.setValue(0)\nlampuit = lambda: g.setValue(1)\n\n#lampaan = lambda: print(\"1\")\n#lampuit = lambda: print(\"0\")\n\n# From https://www.geeksforgeeks.org/morse-code-translator-python/\n# by Palash Nigam\nMORSE_CODE_DICT = { 'A':'.-', 'B':'-...',\n 'C':'-.-.', 'D':'-..', 'E':'.',\n 'F':'..-.', 'G':'--.', 'H':'....',\n 'I':'..', 'J':'.---', 'K':'-.-',\n 'L':'.-..', 'M':'--', 'N':'-.',\n 'O':'---', 'P':'.--.', 'Q':'--.-',\n 'R':'.-.', 'S':'...', 'T':'-',\n 'U':'..-', 'V':'...-', 'W':'.--',\n 'X':'-..-', 'Y':'-.--', 'Z':'--..',\n '1':'.----', '2':'..---', '3':'...--',\n '4':'....-', '5':'.....', '6':'-....',\n '7':'--...', '8':'---..', '9':'----.',\n '0':'-----', ',':'--..--', '.':'.-.-.-',\n '?':'..--..', '/':'-..-.', '-':'-....-',\n '(':'-.--.', ')':'-.--.-', ' ':' '}\n\n\ndef beep(num_units):\n lampaan()\n time.sleep(unit_length*num_units)\n lampuit()\n\n\ndef pause(num_units):\n time.sleep(unit_length*num_units)\n\n\ndef play_letter(letter):\n for morse_symbol in MORSE_CODE_DICT[letter]:\n if morse_symbol == '.':\n beep(1)\n elif morse_symbol == '-':\n beep(3)\n elif morse_symbol == ' ':\n pause(1)\n pause(1)\n\n\ndef play_text(text):\n text = [c for c in str.upper(text) if c in MORSE_CODE_DICT]\n for letter in text:\n play_letter(letter)\n pause(3-1)\n\n\nif __name__ == \"__main__\":\n for line in sys.stdin:\n play_text(line)\n" } ]
4
moming3805/python_douyin
https://github.com/moming3805/python_douyin
5255ba4caf2098f2a2d1fc62be39e1b150866128
b69c069e8944c42045a615da5ac2fe581d3e442e
6f7addb4aa691a343daf421a0a7dfe8556fc7a81
refs/heads/master
2023-09-01T05:49:26.910082
2021-10-09T10:33:29
2021-10-09T10:33:29
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.618747353553772, "alphanum_fraction": 0.6393442749977112, "avg_line_length": 31.148649215698242, "blob_id": "5e6fcb520cb712ab6387c315ce188799dd51d0db", "content_id": "52836265103b1163ea4593de08e23e9c4766e14d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2977, "license_type": "no_license", "max_line_length": 95, "num_lines": 74, "path": "/douyin.py", "repo_name": "moming3805/python_douyin", "src_encoding": "UTF-8", "text": "# 滑块验证码处理\n# 1. web 自动化显示验证码\n# 2. 获取滑块以及滑块图片地址\n# 3. 下载滑块以及滑块图片\n# 4. 人工智能匹配滑块验证码距离\n# 5. 缩放比例以及校准滑块偏移量\n# 6. ActionChanis滑块解锁\n# 7. 人工智能模拟和跟踪滑块滑动轨迹\n# 8. 滑动失败后增加重试机制\n\n\n# 1. web 自动化显示验证码\nimport time\n\nimport cv2\nimport requests as requests\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom PIL import Image as image\ndriver = webdriver.Chrome('chromedriver.exe')\nwhile True:\n driver.get('https://www.douyin.com/search/云南?source=search_history&type=user')\n time.sleep(2)\n # 2. 获取滑块以及滑块图片地址\n big_ele = driver.find_element(By.XPATH, '//*[@id=\"captcha-verify-image\"]')\n big_url = big_ele.get_attribute('src')\n print(\"big_url:\" + big_url)\n small_ele = driver.find_element(By.XPATH, '//*[@id=\"captcha_container\"]/div/div[2]/img[2]')\n small_url = small_ele.get_attribute('src')\n print('small_url:' + small_url)\n # 3. 下载滑块以及滑块图片\n with open('img/big_img.jpeg', 'wb') as f:\n f.write(requests.get(big_url).content)\n f.close()\n\n with open('img/small_img.png', 'wb') as f:\n f.write(requests.get(small_url).content)\n f.close()\n # 4. 人工智能匹配滑块验证码距离\n big_gray = cv2.imread('img/big_img.jpeg', 0) # 以灰度模式加载图片\n small_gray = cv2.imread('img/small_img.png', 0) # 以灰度模式加载图片\n # 大图灰色模版和小图灰色模版比较\n res = cv2.matchTemplate(big_gray, small_gray, cv2.TM_CCORR_NORMED) # 匹配对象\n distance = cv2.minMaxLoc(res) # 匹配小图和大图最左边和最右边的结果\n print(distance)\n\n # 获取原图像素\n big_img_size = image.open('img/big_img.jpeg')\n big_w = big_img_size.width # 原图的宽\n big_h = big_img_size.height # 原图的高\n\n # 5. 缩放比例以及校准滑块偏移量 实际 340*212 原图 big_w * big_h\n x = distance[2][0] # 原图距离\n print(x) # x+5 = big_w\n x = int(x * 340 / big_w) # 缩放比例\n print(x)\n py = 5 - int(5 * 340 / big_w) # 偏移量\n print(py)\n x = x - py # 滑块的移动距离\n print(x)\n # 6. ActionChanis滑块解锁\n small_ele = driver.find_element(By.XPATH, '//*[@id=\"captcha_container\"]/div/div[2]/img[2]')\n action = ActionChains(driver) # 初始化一个鼠标对象\n action.click_and_hold(small_ele).perform() # 鼠标按住左键不动\n action.drag_and_drop_by_offset(small_ele, x, 0).perform() # 把滑块滑动到指定的坐标\n time.sleep(2)\n # 7. 人工智能模拟和跟踪滑块滑动轨迹\n\n # 8. 滑动失败后增加重试机制\n try:\n driver.find_element(By.XPATH, '//*[@id=\"secsdk-captcha-drag-wrapper\"]/div[2]')\n except Exception as e:\n break\n" } ]
1
rodrigobmg/ovj3
https://github.com/rodrigobmg/ovj3
6860c19f0469b8ddc51bee208b3c3a00e36af27d
ab92686b5d8f1eec98021acdd08688cfee651e7f
147fa24bc83a2f6952ce4ae2861f6d8973bbf528
refs/heads/master
2021-01-23T13:37:06.107967
2016-02-29T01:13:25
2016-02-29T01:13:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5316455960273743, "alphanum_fraction": 0.5443037748336792, "avg_line_length": 21.571428298950195, "blob_id": "b21002c7137ec834c8a62d988fd0a3e39d00d935", "content_id": "28e3086ed72750191296715c564512811830260e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 474, "license_type": "no_license", "max_line_length": 53, "num_lines": 21, "path": "/src/vnmrj/makevjlib.sh", "repo_name": "rodrigobmg/ovj3", "src_encoding": "UTF-8", "text": "#!/bin/ksh -p\n\nPRG=`whence java` >/dev/null 2>&1\n\nwhile [[ -h \"$PRG\" ]]; do\n ls=`/usr/bin/ls -ld \"$PRG\"`\n link=`/usr/bin/expr \"$ls\" : '^.*-> \\(.*\\)$'`\n if /usr/bin/expr \"$link\" : '^/' > /dev/null; then\n prg=\"$link\"\n else\n prg=\"`/usr/bin/dirname $PRG`/$link\"\n fi\n PRG=`whence \"$prg\"` > /dev/null 2>&1\ndone\n\njdir=`/usr/bin/dirname $PRG`\njdir2=`/usr/bin/dirname $jdir`\nJDK_DIR=$jdir2\n# echo $JDK_DIR\nexport JDK_DIR\nmake -f makevnmrj libvnmrj.so\n" }, { "alpha_fraction": 0.7238805890083313, "alphanum_fraction": 0.7313432693481445, "avg_line_length": 18.14285659790039, "blob_id": "1b0512ae82be9b496f59fcc7ce1bf66130fd5589", "content_id": "c81e1621f2c12832eacff59eb1af620ad720d242", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 134, "license_type": "no_license", "max_line_length": 66, "num_lines": 7, "path": "/README.md", "repo_name": "rodrigobmg/ovj3", "src_encoding": "UTF-8", "text": "# ovj3\n\n# NOTICE\n\n__This repository contains very old code and will NOT be updated. \n\n*This repository will be deleted. Do not fork.*\n" }, { "alpha_fraction": 0.597744345664978, "alphanum_fraction": 0.6052631735801697, "avg_line_length": 12.947368621826172, "blob_id": "bc1a4072c65459f2f908c642830c31ba7271b575", "content_id": "2ea0177c99b043d46abec2a464c501e076e1906a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 266, "license_type": "no_license", "max_line_length": 46, "num_lines": 19, "path": "/src/vjclient/makefile", "repo_name": "rodrigobmg/ovj3", "src_encoding": "UTF-8", "text": "all:; -(scons)\n\nbuild-local:; -(scons)\n\nclean:; -(scons -c)\n\ndiff-global:; -(git diff)\n\ndiff-local:; -(git diff .)\n\nGitk:; -(gitk --since=\"2 weeks ago\")\n\nPull:; -(git pull)\n\nPush:; -(git push)\n\nResolve-conflicts:; -(git mergetool -t kdiff3)\n\nStatus:; -(git status)\n\n" }, { "alpha_fraction": 0.5928659439086914, "alphanum_fraction": 0.6051660776138306, "avg_line_length": 18.829267501831055, "blob_id": "eb3f3536cfc051447de91eb3fe6dc2e656ddba5c", "content_id": "a1681f79ab0e33b26fe5eac6b9f87949bf9aba13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 813, "license_type": "no_license", "max_line_length": 59, "num_lines": 41, "path": "/src/p11/bin/S99scanlog", "repo_name": "rodrigobmg/ovj3", "src_encoding": "UTF-8", "text": "#!/bin/sh\n# \n# Agilent Technologies All Rights Reserved.\n# This software contains proprietary and confidential\n# information of Agilent Technologies and its contributors.\n# Use, disclosure and reproduction is prohibited without\n# prior consent.\n#\n#\n# S99scanlog.sh\n# Starting scanlog daemon, system V style\n\ncase \"$1\" in\n'start')\n if [ -r /usr/varian/sbin/scanlog ]\n\tthen\n echo 'starting scanlog daemon'\n\n /usr/varian/sbin/scanlog &\n\t\techo $! > /vnmr/adm/scanlogpid\n\t\tchmod 600 /vnmr/adm/scanlogpid\n fi\n ;;\n\n'stop')\n\tif [ -r /vnmr/adm/scanlogpid ]\n\tthen\n \tscl_id=`cat /vnmr/adm/scanlogpid`\n\t\tif [ x$scl_id != x ]\n\t\tthen\n\t\t /usr/bin/kill -9 $scl_id\n\t\tfi\n\tfi\n ;;\n\n*)\n echo \"Usage: $0 { start | stop }\"\n exit 1\n ;;\nesac\nexit 0\n" }, { "alpha_fraction": 0.46650123596191406, "alphanum_fraction": 0.5139233469963074, "avg_line_length": 26.266918182373047, "blob_id": "53ade531adb1176895b9de1784375935693ccd8d", "content_id": "fe5ff49db776f2ce9e1ff94c0eae7145e269228c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7254, "license_type": "no_license", "max_line_length": 78, "num_lines": 266, "path": "/src/ib/dn2f.c", "repo_name": "rodrigobmg/ovj3", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2015 University of Oregon\n *\n * You may distribute under the terms of either the GNU General Public\n * License or the Apache License, as specified in the README file.\n *\n * For more information, see the README file.\n */\n/* dn2f.f -- translated by f2c (version 20090411).\n You must link the resulting object file with libf2c:\n\ton Microsoft Windows system, link with libf2c.lib;\n\ton Linux or Unix systems, link with .../path/to/libf2c.a -lm\n\tor, if you install libf2c.a in a standard place, with -lf2c -lm\n\t-- in that order, at the end of the command line, as in\n\t\tcc *.o -lf2c -lm\n\tSource for libf2c is in /netlib/f2c/libf2c.zip, e.g.,\n\n\t\thttp://www.netlib.org/f2c/libf2c.zip\n*/\n\n#include \"f2c.h\"\n\n/* Table of constant values */\n\nstatic integer c__1 = 1;\n\n/* Subroutine */ int dn2f_(integer *n, integer *p, doublereal *x, S_fp calcr, \n\tinteger *iv, integer *liv, integer *lv, doublereal *v, integer *\n\tuiparm, doublereal *urparm, U_fp ufparm)\n{\n /* Initialized data */\n\n static doublereal hlim = .1;\n static doublereal negpt5 = -.5;\n static doublereal one = 1.;\n static doublereal zero = 0.;\n\n /* System generated locals */\n integer i__1, i__2;\n doublereal d__1, d__2;\n\n /* Local variables */\n static doublereal h__;\n static integer i__, k, d1;\n static doublereal h0;\n static integer n1, n2, r1, dk, nf, ng, rn;\n static doublereal xk;\n static integer j1k, dr1, rd1, iv1;\n extern /* Subroutine */ int drn2g_(doublereal *, doublereal *, integer *, \n\t integer *, integer *, integer *, integer *, integer *, integer *, \n\t integer *, doublereal *, doublereal *, doublereal *, doublereal *)\n\t , dn2rdp_(integer *, integer *, integer *, integer *, doublereal *\n\t , doublereal *), dv7scp_(integer *, doublereal *, doublereal *), \n\t divset_(integer *, integer *, integer *, integer *, doublereal *);\n\n\n/* *** MINIMIZE A NONLINEAR SUM OF SQUARES USING RESIDUAL VALUES ONLY.. */\n/* *** THIS AMOUNTS TO DN2G WITHOUT THE SUBROUTINE PARAMETER CALCJ. */\n\n/* *** PARAMETERS *** */\n\n/* /6 */\n/* INTEGER IV(LIV), UIPARM(1) */\n/* DOUBLE PRECISION X(P), V(LV), URPARM(1) */\n/* /7 */\n/* / */\n\n/* ----------------------------- DISCUSSION ---------------------------- */\n\n/* THIS AMOUNTS TO SUBROUTINE NL2SNO (REF. 1) MODIFIED TO CALL */\n/* DRN2G. */\n/* THE PARAMETERS FOR DN2F ARE THE SAME AS THOSE FOR DN2G */\n/* (WHICH SEE), EXCEPT THAT CALCJ IS OMITTED. INSTEAD OF CALLING */\n/* CALCJ TO OBTAIN THE JACOBIAN MATRIX OF R AT X, DN2F COMPUTES */\n/* AN APPROXIMATION TO IT BY FINITE (FORWARD) DIFFERENCES -- SEE */\n/* V(DLTFDJ) BELOW. DN2F USES FUNCTION VALUES ONLY WHEN COMPUT- */\n/* THE COVARIANCE MATRIX (RATHER THAN THE FUNCTIONS AND GRADIENTS */\n/* THAT DN2G MAY USE). TO DO SO, DN2F SETS IV(COVREQ) TO MINUS */\n/* ITS ABSOLUTE VALUE. THUS V(DELTA0) IS NEVER REFERENCED AND ONLY */\n/* V(DLTFDC) MATTERS -- SEE NL2SOL FOR A DESCRIPTION OF V(DLTFDC). */\n/* THE NUMBER OF EXTRA CALLS ON CALCR USED IN COMPUTING THE JACO- */\n/* BIAN APPROXIMATION ARE NOT INCLUDED IN THE FUNCTION EVALUATION */\n/* COUNT IV(NFCALL), BUT ARE RECORDED IN IV(NGCALL) INSTEAD. */\n\n/* V(DLTFDJ)... V(43) HELPS CHOOSE THE STEP SIZE USED WHEN COMPUTING THE */\n/* FINITE-DIFFERENCE JACOBIAN MATRIX. FOR DIFFERENCES IN- */\n/* VOLVING X(I), THE STEP SIZE FIRST TRIED IS */\n/* V(DLTFDJ) * MAX(ABS(X(I)), 1/D(I)), */\n/* WHERE D IS THE CURRENT SCALE VECTOR (SEE REF. 1). (IF */\n/* THIS STEP IS TOO BIG, I.E., IF CALCR SETS NF TO 0, THEN */\n/* SMALLER STEPS ARE TRIED UNTIL THE STEP SIZE IS SHRUNK BE- */\n/* LOW 1000 * MACHEP, WHERE MACHEP IS THE UNIT ROUNDOFF. */\n/* DEFAULT = MACHEP**0.5. */\n\n/* *** REFERENCE *** */\n\n/* 1. DENNIS, J.E., GAY, D.M., AND WELSCH, R.E. (1981), AN ADAPTIVE */\n/* NONLINEAR LEAST-SQUARES ALGORITHM, ACM TRANS. MATH. */\n/* SOFTWARE, VOL. 7, NO. 3. */\n\n/* *** GENERAL *** */\n\n/* CODED BY DAVID M. GAY. */\n\n/* +++++++++++++++++++++++++++ DECLARATIONS +++++++++++++++++++++++++++ */\n\n/* *** EXTERNAL SUBROUTINES *** */\n\n\n/* DIVSET.... PROVIDES DEFAULT IV AND V INPUT COMPONENTS. */\n/* DRN2G... CARRIES OUT OPTIMIZATION ITERATIONS. */\n/* DN2RDP... PRINTS REGRESSION DIAGNOSTICS. */\n/* DV7SCP... SETS ALL COMPONENTS OF A VECTOR TO A SCALAR. */\n\n/* *** LOCAL VARIABLES *** */\n\n\n/* *** IV AND V COMPONENTS *** */\n\n/* /6 */\n/* DATA COVREQ/15/, D/27/, DINIT/38/, DLTFDJ/43/, J/70/, MODE/35/, */\n/* 1 NEXTV/47/, NFCALL/6/, NFGCAL/7/, NGCALL/30/, NGCOV/53/, */\n/* 2 R/61/, REGD/67/, REGD0/82/, TOOBIG/2/, VNEED/4/ */\n/* /7 */\n/* / */\n /* Parameter adjustments */\n --x;\n --iv;\n --v;\n --uiparm;\n --urparm;\n\n /* Function Body */\n\n/* --------------------------------- BODY ------------------------------ */\n\n if (iv[1] == 0) {\n\tdivset_(&c__1, &iv[1], liv, lv, &v[1]);\n }\n iv[15] = -abs(iv[15]);\n iv1 = iv[1];\n if (iv1 == 14) {\n\tgoto L10;\n }\n if (iv1 > 2 && iv1 < 12) {\n\tgoto L10;\n }\n if (iv1 == 12) {\n\tiv[1] = 13;\n }\n if (iv[1] == 13) {\n\tiv[4] = iv[4] + *p + *n * (*p + 2);\n }\n drn2g_(&x[1], &v[1], &iv[1], liv, lv, n, n, &n1, &n2, p, &v[1], &v[1], &v[\n\t 1], &x[1]);\n if (iv[1] != 14) {\n\tgoto L999;\n }\n\n/* *** STORAGE ALLOCATION *** */\n\n iv[27] = iv[47];\n iv[61] = iv[27] + *p;\n iv[82] = iv[61] + *n;\n iv[70] = iv[82] + *n;\n iv[47] = iv[70] + *n * *p;\n if (iv1 == 13) {\n\tgoto L999;\n }\n\nL10:\n d1 = iv[27];\n dr1 = iv[70];\n r1 = iv[61];\n rn = r1 + *n - 1;\n rd1 = iv[82];\n\nL20:\n drn2g_(&v[d1], &v[dr1], &iv[1], liv, lv, n, n, &n1, &n2, p, &v[r1], &v[\n\t rd1], &v[1], &x[1]);\n if ((i__1 = iv[1] - 2) < 0) {\n\tgoto L30;\n } else if (i__1 == 0) {\n\tgoto L50;\n } else {\n\tgoto L100;\n }\n\n/* *** NEW FUNCTION VALUE (R VALUE) NEEDED *** */\n\nL30:\n nf = iv[6];\n (*calcr)(n, p, &x[1], &nf, &v[r1], &uiparm[1], &urparm[1], (U_fp)ufparm);\n if (nf > 0) {\n\tgoto L40;\n }\n iv[2] = 1;\n goto L20;\nL40:\n if (iv[1] > 0) {\n\tgoto L20;\n }\n\n/* *** COMPUTE FINITE-DIFFERENCE APPROXIMATION TO DR = GRAD. OF R *** */\n\n/* *** INITIALIZE D IF NECESSARY *** */\n\nL50:\n if (iv[35] < 0 && v[38] == zero) {\n\tdv7scp_(p, &v[d1], &one);\n }\n\n j1k = dr1;\n dk = d1;\n ng = iv[30] - 1;\n if (iv[1] == -1) {\n\t--iv[53];\n }\n i__1 = *p;\n for (k = 1; k <= i__1; ++k) {\n\txk = x[k];\n/* Computing MAX */\n\td__1 = abs(xk), d__2 = one / v[dk];\n\th__ = v[43] * max(d__1,d__2);\n\th0 = h__;\n\t++dk;\nL60:\n\tx[k] = xk + h__;\n\tnf = iv[7];\n\t(*calcr)(n, p, &x[1], &nf, &v[j1k], &uiparm[1], &urparm[1], (U_fp)\n\t\tufparm);\n\t++ng;\n\tif (nf > 0) {\n\t goto L70;\n\t}\n\th__ = negpt5 * h__;\n\tif ((d__1 = h__ / h0, abs(d__1)) >= hlim) {\n\t goto L60;\n\t}\n\tiv[2] = 1;\n\tiv[30] = ng;\n\tgoto L20;\nL70:\n\tx[k] = xk;\n\tiv[30] = ng;\n\ti__2 = rn;\n\tfor (i__ = r1; i__ <= i__2; ++i__) {\n\t v[j1k] = (v[j1k] - v[i__]) / h__;\n\t ++j1k;\n/* L80: */\n\t}\n/* L90: */\n }\n goto L20;\n\nL100:\n if (iv[67] > 0) {\n\tiv[67] = rd1;\n }\n dn2rdp_(&iv[1], liv, lv, n, &v[rd1], &v[1]);\n\nL999:\n return 0;\n\n/* *** LAST LINE OF DN2F FOLLOWS *** */\n} /* dn2f_ */\n\n" }, { "alpha_fraction": 0.562663197517395, "alphanum_fraction": 0.5882506370544434, "avg_line_length": 21.397661209106445, "blob_id": "e029f81983545ce40aa5f4712aad801fb1eb9cbc", "content_id": "9cae4e4ccafc07d59d7eef30f1564e767d64db77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3830, "license_type": "no_license", "max_line_length": 129, "num_lines": 171, "path": "/common/adm/acq/testControllers", "repo_name": "rodrigobmg/ovj3", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#############################\n# testControllers #\n# #\n# John Ryan #\n# [email protected] #\n# #\n# Version: 2.00 (13-Feb-15) #\n# #\n#############################\n\n# Version 1.00 (30-May-14)\n# Initial release\n#\n# Version 1.01 (30-May-14)\n# Now checks both the 172.16.0.x and 10.0.0.x subnets for controllers\n#\n# Version 2.00 (13-Feb-15)\n# Changed name from testControllers.sh to testControllers\n# Works for any IP address range\n# More user-friendly output\n# Checks for minimum master1, rfx and ddrx on direct drive systems\n# Checks for non-contiguous rfx or ddrx controllers\n\n#####################\n# START Start start #\n#####################\n\n# Check for root user\nUSERID=\"`/usr/bin/id | awk '{print $1}'`\"\nif [ ${USERID} != \"uid=0(root)\" ]\nthen\n\t echo -e \"\\nTo run this script you will need to be the system's root user.\\n\"\n\t exit 1\nfi\n\n# Look for wormhole in /etc/hosts and determine IP address range\nIPRANGE=`getent hosts wormhole | awk -F: '{split($1,octet,\".\"); printf(\"%d.%d.%d\\n\"),octet[1],octet[2],octet[3]}'`\n\nif [ \"x${IPRANGE}\" = \"x\" ]\nthen\n\techo -e \"\\nERROR: wormhole not found in /etc/hosts! Please run setacq first.\\n\"\n\texit 1\nfi\n\n# Determine which addresses are active\necho -n \"Determining which network addresses are active.\"\nCONTROLLERS=`cat /etc/hosts |grep \"^${IPRANGE}\" | awk '{print$2}'`\n\nINOVA=0\nMASTER=0\nRF=0\nDDR=0\nCURRENTRF=1\nCURRENTDDR=1\nMISSINGRF=\"\"\nMISSINGDDR=\"\"\nACTIVE=\"\"\n\nfor CONTROLLER in $CONTROLLERS\ndo\n\tcase ${CONTROLLER} in\n\tmaster1)\n\t\tping -W 1 -c 1 ${CONTROLLER} >/dev/null 2>&1\n\t\tif [ $? -eq 0 ]\n\t\tthen\n\t\t\tACTIVE=\"$ACTIVE $CONTROLLER\"\n\t\t\tMASTER=1\n\t\tfi\n\t\t;;\n\trf*)\n\t\tping -W 1 -c 1 ${CONTROLLER} >/dev/null 2>&1\n\t\tif [ $? -eq 0 ]\n\t\tthen\n\t\t\tACTIVE=\"${ACTIVE} ${CONTROLLER}\"\n\t\t\tRF=1\n\t\t\tTEMPRF=`echo ${CONTROLLER} | sed 's/^rf//'`\n\t\t\tif [ ${TEMPRF} -eq ${CURRENTRF} ]\n\t\t\tthen\n\t\t\t\t((++CURRENTRF))\n\t\t\telse\n\t\t\t\twhile [ ${TEMPRF} -ne ${CURRENTRF} ]\n\t\t\t\tdo\n\t\t\t\t\tMISSINGRF=\"${MISSINGRF} rf${CURRENTRF}\"\n\t\t\t\t\t((++CURRENTRF))\n\t\t\t\tdone\n\t\t\tfi\n\t\tfi\n\t\t;;\n\tddr*)\n\t\tping -W 1 -c 1 ${CONTROLLER} >/dev/null 2>&1\n\t\tif [ $? -eq 0 ]\n\t\tthen\n\t\t\tACTIVE=\"${ACTIVE} ${CONTROLLER}\"\n\t\t\tDDR=1\n\t\t\tTEMPDDR=`echo ${CONTROLLER} | sed 's/^ddr//'`\n\t\t\tif [ ${TEMPDDR} -eq ${CURRENTDDR} ]\n\t\t\tthen\n\t\t\t\t((++CURRENTDDR))\n\t\t\telse\n\t\t\t\twhile [ ${TEMPDDR} -ne ${CURRENTDDR} ]\n\t\t\t\tdo\n\t\t\t\t\tMISSINGDDR=\"${MISSINGDDR} ddr${CURRENTDDR}\"\n\t\t\t\t\t((++CURRENTDDR))\n\t\t\t\tdone\n\t\t\tfi\n\t\tfi\n\t\t;;\n\tinova)\n\t\tINOVA=1\n\t\tping -W 1 -c 1 ${CONTROLLER} >/dev/null 2>&1\n\t\tif [ $? -eq 0 ]\n\t\tthen\n\t\t\tACTIVE=\"${ACTIVE} ${CONTROLLER}\"\n\t\tfi\n\t\t;;\n\t\n\t*)\n\t\tping -W 1 -c 1 ${CONTROLLER} >/dev/null 2>&1\n\t\tif [ $? -eq 0 ]\n\t\tthen\n\t\t\tACTIVE=\"${ACTIVE} ${CONTROLLER}\"\n\t\tfi\n\t\t;;\n\tesac\n\techo -n .\ndone\necho\n\n# Check for master1, rfx and ddrx on direct drive systems\nif [ ${INOVA} -eq 0 ] && ( [ ${MASTER} -eq 0 ] || [ ${RF} -eq 0 ] || [ ${DDR} -eq 0 ] )\nthen\n\techo\n\tif [ ${MASTER} -eq 0 ]\n\tthen\n\t\techo \"WARNING: No master controller detected!\"\n\tfi\n\tif [ ${RF} -eq 0 ]\n\tthen\n\t\techo \"WARNING: No RF controller detected!\"\n\tfi\n\tif [ ${DDR} -eq 0 ]\n\tthen\n\t\techo \"WARNING: No DDR controller detected!\"\n\tfi\n\techo -e \"The minimum requirement for the console to boot is a master controller and a minimum of one RF and one DDR controller.\"\nfi\n\n# Warn if any rfx or ddrx are missing\nif [ \"x${MISSINGRF}\" != \"x\" ]\nthen\n\techo -e \"\\nWARNING: Your system has non-contiguous RF controllers.\"\n\techo \"Missing RF controllers:${MISSINGRF}\"\nfi\n\nif [ \"x${MISSINGDDR}\" != \"x\" ]\nthen\n\techo -e \"\\nWARNING: Your system has non-contiguous DDR controllers.\"\n\techo \"Missing DDR controllers:${MISSINGDDR}\"\nfi\n\n# Test communication\necho -e \"\\nActive addresses:$ACTIVE\\n\"\nfor CONTROLLER in $ACTIVE\ndo\n\techo -n \"$CONTROLLER : \"\n\tping -q -w 20 -c 5000 -f $CONTROLLER | grep received\ndone\n\necho \"Done.\"\n" }, { "alpha_fraction": 0.6520840525627136, "alphanum_fraction": 0.6579400897026062, "avg_line_length": 36.20512771606445, "blob_id": "51cdfaf84d678c5b05a4a075dd272322a0cf2556", "content_id": "7a705655c4642020e60c3ba802a45d16bdaced6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2903, "license_type": "no_license", "max_line_length": 118, "num_lines": 78, "path": "/src/inova/SConstruct", "repo_name": "rodrigobmg/ovj3", "src_encoding": "UTF-8", "text": "#\n\nimport os\nimport sys\nimport glob\nimport shutil\nsys.path.append(os.path.join(os.getcwd(), os.pardir, os.pardir, 'scripts'))\nimport myShutil # for better copytree()\nimport myShutilsym\n\n# The console software must be compiled on a Sun. Use the makemi script\n# to accomplish this.\n\n# we need to specify an absolute path so this SConstruct file\n# can be called from any other SConstruct file\ncwd = os.getcwd() \n\n\n# dirList = [ 'vxBoot.small',\n# 'vxBoot.big',\n# 'vxBootPPC.small',\n# 'vxBootPPC.big',\n# 'vxBoot.auto']\n\ndirList2 = [ 'maclib',\n 'parlib',\n 'templates']\n\n# define with absolute path where built files will be copied\n# consolePath = os.path.join(cwd, os.pardir, os.pardir, os.pardir,\n# 'console', 'inova','acq')\n# if not os.path.exists(consolePath):\n# os.makedirs(consolePath)\n\n#copy the directory\n# for i in dirList:\n# myShutil.copytree(i,consolePath,symlinks = False)\n# Execute('chmod 644 ' + consolePath + '/' + i + '/*')\n\n# Execute(Copy(os.path.join(consolePath,'tms320dsp.ram'),os.path.join(cwd,'tms320dsp.ram')))\n# Execute('cd '+consolePath+'; ln -s vxBoot.small vxBoot')\n# Execute('cd '+consolePath+'; ln -s vxBootPPC.small vxBootPPC')\n\nconsolePath = os.path.join(cwd, os.pardir, os.pardir, os.pardir,\n 'console', 'inova')\nif not os.path.exists(consolePath):\n os.makedirs(consolePath)\nfor i in dirList2:\n myShutil.copytree(i,consolePath,symlinks = False)\nconsolePath = os.path.join(cwd, os.pardir, os.pardir, os.pardir,\n 'console', 'inova','templates','vnmrj')\nExecute('cd '+consolePath+'; ln -s protocols_nn protocols')\n\nconsolePath = os.path.join(cwd, os.pardir, os.pardir, os.pardir,\n 'console', 'inova','user_templates')\nif not os.path.exists(consolePath):\n os.makedirs(consolePath)\nExecute(Copy(os.path.join(consolePath,'global'),os.path.join(cwd,'global')))\n\nddrconsolePath = os.path.join(cwd, os.pardir, os.pardir, os.pardir,\n 'console', 'ddr','user_templates')\nif not os.path.exists(ddrconsolePath):\n os.makedirs(ddrconsolePath)\nExecute(Copy(os.path.join(ddrconsolePath,'global'),os.path.join(cwd,'global')))\n\n#Biopack file\nconsolePath = os.path.join(cwd, os.pardir, os.pardir, os.pardir,\n 'console', 'inova','biopack','templates','vnmrj','interface')\nif not os.path.exists(consolePath):\n os.makedirs(consolePath)\nExecute(Copy(os.path.join(consolePath,'ExperimentSelector.xml'),os.path.join(cwd,'biopack','ExperimentSelector.xml')))\n\n# Make a copy for Mercury\n#consolePath = os.path.join(cwd, os.pardir, os.pardir, os.pardir,\n# 'console', 'mercury','acq')\n#if not os.path.exists(consolePath):\n# os.makedirs(consolePath)\n#Execute(Copy(os.path.join(consolePath,'tms320dsp.ram'),os.path.join(cwd,'tms320dsp.ram')))\n\n" }, { "alpha_fraction": 0.5288300514221191, "alphanum_fraction": 0.5362848043441772, "avg_line_length": 40.339622497558594, "blob_id": "d87a04178005868865152d30fee8f0e7dd1e5862", "content_id": "c17c6b211058dec19dee2dadadeef0ee4e28e21b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6573, "license_type": "no_license", "max_line_length": 75, "num_lines": 159, "path": "/src/acqproc/ACQPROC_strucs.h", "repo_name": "rodrigobmg/ovj3", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2015 University of Oregon\n *\n * You may distribute under the terms of either the GNU General Public\n * License or the Apache License, as specified in the README file.\n *\n * For more information, see the README file.\n */\n/*\n*/\n/*------------------------------------------------------------------------\n|\n|\tThe AcqProc Queue structure \n|\t This information is passed to the Acq. process by PSG.\n|\n| Modified Author Purpose\n| -------- ------ -------\n| 2/22/89 Greg B. 1. Added int CurrentElem and int CompleteElem to\n|\t\t\t struct _value for Resume Acquistion (RA)\n+-----------------------------------------------------------------------*/\nstruct _value { \n\t\t char *Acqfile;\t/* acqfile path */\n\t\t char *Codefile;\t/* acqqueue path */\n\t\t char *RF_Patfile;\t/* RF Obs. & Dec. pattern file */\n\t\t char *Gradfile;\t/* Gradient file */\n\t\t char *Xpanfile;\t/* Future Expansion file */\n\t\t char *MachineID;\t/* Host name of INET socket */\n\t\t int InetPort;\t/* Inter-Net Port Number */\n\t\t int InetPid;\t/* Inter-Net Port Process ID Number */\n\t\t int SuFlag;\t/* suflag sets go, shim, spin, etc.*/\n\t\t int ExpFlags;\t/* Experiment Flags */\n\t\t int AcqPos;\t/* index in Acq system,a status also */\n\t\t int HalID;\t/* Acq. system's Exper. ID 0-1024 */\n\t\t long DatSub; /* Date & Time of day of submission.*/\n\t\t long DatAct;\t/* Date & Time became active in Acq */\n\t\t long DatFin; /* Date & Time of day of completion.*/\n\t\t double ExpDuration;/* Approx. time duration of Exp (sec)*/\n\t unsigned long CurrentElem;\t/* Current Element, Exp to Start at (RA)*/\n\t unsigned long CompleteElem;/* Total Complete Elements of Exp. (RA) */\n\t\t};\ntypedef struct _value Value;\n\n/*---------------------------------------------------------------------\n|\tThis the acquisition information maintain by the Acq. process\n|\tand sent to the Acquisition display program for user consumption.\n|\tmod 6/8/88 to be consistent on both sun3 & sun4\n+---------------------------------------------------------------------*/\n#ifndef MAX_SHIMS_CONFIGURED\n#define MAX_SHIMS_CONFIGURED (48) /* Match define in hostAcqStructs.h */\n#endif\nstruct _acqstat {\n\n long AcqCT;\n long AcqCmpltTime;\n long AcqRemTime;\n long AcqDataTime;\n\t\t long AcqSuFlag;\n long AcqSpinSet;\n long AcqSpinAct;\n long AcqSpinSpeedLimit;\n short Acqstate;\n short AcqExpInQue;\n unsigned long AcqFidElem;\n short AcqLSDV;\n short AcqLockLevel;\n\t\t short AcqSpinSpan;\n\t\t short AcqSpinAdj;\n\t\t short AcqSpinMax;\n\t\t short AcqSpinActSp;\n\t\t short AcqSpinProfile;\n short AcqVTSet;\n short AcqVTAct;\n short AcqSample;\n char AcqUserID[10];\n char AcqExpID[11];\n \t\t char dummy;\n\t\t char probeId1[20];\n char gradCoilId[12];\n\t\t short AcqShimValues[MAX_SHIMS_CONFIGURED];\n \t\t short AcqShimSet;\n\t\t short AcqLockGain;\n\t\t short AcqLockPower;\n\t\t short AcqLockPhase;\n \t unsigned long AcqShortRfPower[4]; /* microwatts */\n \t unsigned long AcqShortRfLimit[4];\n \t unsigned long AcqLongRfPower[4];\n \t unsigned long AcqLongRfLimit[4];\n short AcqZone;\n short AcqRack;\n };\ntypedef struct _acqstat AcqStatBlock;\n\n/*---------------------------------------------------------------------\n|\tExperiment Data that relevent to each element (fid) during acquisition \n|\tThis is additional information on any experiments that have been\n|\tsent to acquisition and have not finshed yet (includes processing).\n+---------------------------------------------------------------------*/\nstruct _elemvar {\n\t unsigned long ElemNum;/* Element (FID) number of this data */\n\t\t long PCt;\t\t/* CT for this FID */\n\t\t long PGain;\t/* reciver gain for this FID */\n\t\t long PSpin;\t/* spinner rate for this FID */\n\t\t};\n\ntypedef struct _elemvar Expelemstruc;\n\n/*---------------------------------------------------------------------\n|\tExperiment Data that is used or updated during acquisition \n|\tThis is addition information on any experiments that have been\n|\tsent to acquisition and have not finshed yet (includes processing).\n|\n| Modified Author Purpose\n| -------- ------ -------\n| 2/22/89 Greg B. 1. Added long ExpLastElemSent to struct _expvar\n|\n+---------------------------------------------------------------------*/\nstruct _expvar {\n\t\t char *ExpFid;\n\t\t char *ExpCode;\n\t\t char *ExpProc;\n\t\t char *ExpAcq;\n\t\t char *ExpUserDir;\n\t\t Expelemstruc **ExpElem; /* pointer array for elemvar */\n\t\t long *ExpRF_Indx;\t/* Obs & Dec RF pattern index */\n\t\t long *ExpGradIndx;\t/* Gradient index */\n\t\t long *ExpXpanIndx;\t/* future expansion index */\n\t unsigned long NxtFid;\n\t unsigned long ExpLastElemSent; /* last element sent down the pipe */\n\t\t long ExpIndx[4];\t/* Code index (offset for each FID)*/\n\t\t long DataPtSiz;\t/* data point size in bytes */\n\t\t long FidSiz;\t/* total data size */\n\t\t long N_Pts;\t/* number of data points (np) */\n\t\t long N_Fids;\t/* number of fids (nf) */\n\t\t long N_Trans;\t/* number of transients (nt) */\n\t\t long N_Bs; /* transients per block size (bs) */\n\t\t short ExpWkNum;\t/* Experiment # to do processing */\n\t\t short ExpIlFlag;\t/* Experiment interleaving flag */\n\t\t int ExpProcMask;\t/* Experiment conditional processing mask */\n\t\t int ExpPipeSiz; /* Pipeline size of HAL for this Exp.*/\n\t\t int ExpInPipe;\t/* Exp. Element in Pipe */\n\t unsigned long ExpElemFin;\t/* Number of Exp. Element Finished */\n\t\t int ExpSuFlag;\t/* Experiment Setup type(alias of go)*/\n\t\t int ExpProcWait;\t/* Conditional Processing Flag */\n\t\t mode_t umask4Vnmr; /* umask as set by PSG */\n\t\t};\ntypedef struct _expvar Expparmstruc;\n\n/*---------------------------------------------------------------------\n|\tMessage packet from external task to AcqProc\n+---------------------------------------------------------------------*/\n\nstruct _messpacket {\n\t\t\tint CmdOption;\t\t/* command Option */\n\t\t\tchar Hostname[50];\t/* Who to reply to */\n\t\t\tint Port;\t\t/* Inter-Net Port Number */\n\t\t\tint PortPid;\t\t/* Inter-Net Port Process # */\n\t\t\tchar Message[256];\t/* message */\n\t\t };\ntypedef struct _messpacket messpacket;\n" }, { "alpha_fraction": 0.5644316077232361, "alphanum_fraction": 0.6083499193191528, "avg_line_length": 24.380733489990234, "blob_id": "1afb425e80db64ecf015e4c4534a3725f609cefc", "content_id": "8a7c11968713c1b7e10b5c2b12fb3e535e415d9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5533, "license_type": "no_license", "max_line_length": 74, "num_lines": 218, "path": "/src/biopack/psglib/satxfer1D.c", "repo_name": "rodrigobmg/ovj3", "src_encoding": "UTF-8", "text": "/*\n * Copyright (C) 2015 University of Oregon\n *\n * You may distribute under the terms of either the GNU General Public\n * License or the Apache License, as specified in the README file.\n *\n * For more information, see the README file.\n */\n/* 1D STD Saturation Transfer Difference experiment with\n\tsculpted suppression of solvent \n\tReference Mayer and Meyer J.A.Ch.Soc.2001,123,6108-6117\n\tFeatures included:\n\t\tRandomization of Magnetization prior to relaxation delay\n\t\t\tG-90-G\n\t\t\t[selected by sspul flag]\n\t\tSolvent suppression during relaxation and or detection \n\t\t\tperiod selected by satmode flag \t \n\t\n\n\t\t\n\tParamters:\n\t\tsspul :\t\ty - selects magnetization randomization option\n\t\thsglvl:\t\tHomospoil gradient level (DAC units)\n\t\thsgt\t:\tHomospoil gradient time\n\t\tsatmode\t:\ty - selects presaturation during relax\n\t\t\t\t\tdelay\n\t\tsatfrq\t:\tpresaturation frequency\n\t\tsatdly\t:\tpresaturation delay\n\t\tsatpwr\t:\tpresaturation power\n\t\tmix\t:\tT1rho spinlock mixing time\n\t\tslpwr\t:\tspin-lock power level\n\t\tslpw\t:\t90 deg pulse width for spinlock\n\t\td1\t:\trelaxation delay\n\t\tselfrq\t:\tfrequency (for selective 180) for signal\n\t\t\t inversion of protein at approx -2 ppm \t\t\n \t\tselfrq1\t:\tfrequency at 30ppm\n\t\tselpwr\t:\tPower of selective 180 pulse at -2 ppm \n\t\tselpw\t:\tSelective 180 deg pulse width -2 ppm\n\t\t\t\tduration ca 50 ms\n\t\tselshape:\tshape of selective 180 pulse at -2ppm\n\t\tselfrqs\t:\tsolvent frequency\n selpwrs :\tpower of 180 deg pulse for solvent suppression\n\t\tselpws\t:\tSelective 180 deg pulse width for solvent supp\n\t\t\t\tduration ca 3.1 ms\n\t\tselshapes : \tShape of 180 deg pulse for solvent supp.\n\t\tgzlvl1, gzlvl2 : Gradient levels during the DPFG echos\n\t\tgt1, gt1:\tGradient times during the DPFG echos\n\t\tgstab\t:\trecovery delay\n cycles1 :\tnumber of selective pulses for saturation\n\t\t\t\tof protein \n the selective inversion pulse is jumping between -2ppm and 30 ppm\n\n\tIgor Goljer June 9 2003\n\n*/\n\n\n#include <standard.h>\n\nstatic int\tph1[8] = {0,2,0,2,1,3,1,3},\n\t\tph5[8] = {2,0,2,0,3,1,3,1},\n\t\tph2[8] = {2,2,0,0,1,1,3,3},\n ph3[8] = {1,3,1,3,2,0,2,0},\n\t\tph4[8] = {0,0,0,0,1,1,1,1};\n\npulsesequence()\n{\n double\t hsglvl,\n\t\t hsgt,\n\t\t slpwr,\n\t\t slpw,\n\t\t mix,\n\t\t cycles,\n cycles1,\n\t\t gzlvl1,\n\t\t gt1,\n\t\t gzlvl2,\n\t\t gt2,\n\t\t satfrq,\n\t\t satdly,\n\t\t satpwr,\n\t\t gstab,\n\t\t selpwr, selpwrs, selfrqs,\n\t\t selfrq, selfrq1,\n\t\t selpws, selpw;\n char sspul[MAXSTR],\n\t\t selshape[MAXSTR], selshapes[MAXSTR],\t\n satmode[MAXSTR];\n\n hsglvl = getval(\"hsglvl\");\n hsgt = getval(\"hsgt\");\n cycles1 = getval(\"cycles1\");\n slpwr = getval(\"slpwr\");\n mix = getval(\"mix\");\n slpw = getval(\"slpw\");\n gzlvl1 = getval(\"gzlvl1\");\n gt1 = getval(\"gt1\");\n gzlvl2 = getval(\"gzlvl2\");\n gt2 = getval(\"gt2\");\n gstab =getval(\"gstab\");\n satfrq = getval(\"satfrq\");\n satpwr = getval(\"satpwr\");\n satdly = getval(\"satdly\");\n selpwr = getval(\"selpwr\");\n selpwrs = getval(\"selpwrs\");\n selfrq = getval(\"selfrq\");\n selfrq1 = getval(\"selfrq1\");\n selfrqs = getval(\"selfrqs\");\n selpw = getval(\"selpw\");\n selpws = getval(\"selpws\");\n getstr(\"selshape\",selshape);\n getstr(\"selshapes\",selshapes);\n getstr(\"sspul\", sspul);\n getstr(\"satmode\",satmode);\n\n cycles = (mix)/(4*slpw);\n initval(cycles,v9);\n initval(cycles1,v5);\n\n settable(t1,8,ph1);\n settable(t5,8,ph5);\n settable(t4,8,ph4);\n settable(t2,8,ph2);\n settable(t3,8,ph3);\n\n getelem(t4,ct,oph);\n getelem(t2,ct,v2);\n getelem(t3,ct,v3);\n/* add(v2,two,v3); \n add(v2,one,v3); */\n mod2(ct,v4); /* 0 1 0 1 0 1 0 1 ..frequency switch */\n\n/* BEGIN THE ACTUAL PULSE SEQUENCE */\n status(A);\n obspower(tpwr);\n delay(5.0e-5);\n\n if (sspul[0] == 'y')\n {\n\tzgradpulse(hsglvl,hsgt);\n\trgpulse(pw,zero,rof1,rof1);\n\tzgradpulse(hsglvl,hsgt);\n }\n\n delay(d1);\n\n if (satmode[0] == 'y') \n {\n obspower(satpwr);\n\tif (satfrq != tof)\n\tobsoffset(satfrq);\n\trgpulse(satdly,zero,rof1,rof1);\n\tif (satfrq != tof)\n\tobsoffset(tof);\n obspower(tpwr);\n }\n ifzero(v4);\n obsoffset(selfrq);\n elsenz(v4);\n obsoffset(selfrq1);\n endif(v4);\n /* Start the selective saturation of protein */ \n\n obspower(selpwr);\n if (cycles1 > 0.0)\n {\n starthardloop(v5);\n delay(0.0005);\n shaped_pulse(selshape,selpw,zero,rof1,rof1);\n delay(0.0005);\n endhardloop(); \n }\n\n obspower(tpwr);\n obsoffset(tof);\n delay(0.000001);\n status(B);\n rgpulse(pw, t1, rof1, rof2);\n /* spin lock pulse for dephasing of protein signals */\n obspower(slpwr);\n rcvroff();\n rgpulse(mix,v3,rof1,rof2);\n\n/* solvent suppression using excitation sculpting */\n\n if (satmode[1] == 'y') \n {\n zgradpulse(gzlvl1,gt1);\n delay(gstab);\n\tif (selfrqs != tof)\n\t obsoffset(selfrqs);\n obspower(selpwrs);\n\tshaped_pulse(selshapes,selpws,t1,rof1,rof1); \n obspower(tpwr);\n if (selfrqs != tof)\n obsoffset(tof);\n rgpulse(2*pw,t5,rof1,rof1);\n delay(gstab); \n zgradpulse(gzlvl1,gt1);\n delay(gstab); \n zgradpulse(gzlvl2,gt2);\n if (selfrqs != tof)\n obsoffset(selfrqs);\n obspower(selpwrs);\n delay(gstab); \n\tshaped_pulse(selshapes,selpws,t5,rof1,rof1); \n obspower(tpwr);\n if (selfrqs != tof)\n obsoffset(tof);\n rgpulse(2*pw,t1,rof1,rof1);\n delay(gstab); \n zgradpulse(gzlvl2,gt2);\n delay(gstab); \n }\n\trcvron();\n\n status(C);\n}\n" } ]
9
lsh950919/image-scrape
https://github.com/lsh950919/image-scrape
9c261454365f3ea222c777e2e4d0053da8a46176
1ec9ebed4852a44e6065b98454358ec9d9567407
df9c9fc503037361e0321c58006c8071f0ead78f
refs/heads/main
2023-08-25T21:38:01.436895
2021-10-06T09:23:12
2021-10-06T09:23:12
414,144,684
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5959596037864685, "alphanum_fraction": 0.608498752117157, "avg_line_length": 29.231578826904297, "blob_id": "d0a757e9382697f658bcfb01cc3775656fe4d4f9", "content_id": "4449050a187c3148aad269522fc42c4fdffb49b8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2871, "license_type": "permissive", "max_line_length": 146, "num_lines": 95, "path": "/scrape.py", "repo_name": "lsh950919/image-scrape", "src_encoding": "UTF-8", "text": "import time\nimport random\nimport shutil\nimport requests\nimport argparse\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\n\nfrom bs4 import BeautifulSoup as bs\n\ndef driver(driver_path):\n options = Options()\n options.add_argument('--headless')\n options.add_argument('--disable-gpu')\n driver = webdriver.Chrome(driver_path, options = options)\n return driver\n\ndef search_pinterest(search_text):\n chrome = driver('chromedriver.exe')\n chrome.get('https://www.pinterest.com/pin/798122365205129040/')\n \n search = chrome.find_element_by_xpath('//*[@id=\"__PWS_ROOT__\"]/div/div/div/div[2]/div[1]/div/div[2]/div/div/form/div/div[1]/div[2]/div/input')\n search.send_keys(search_text + Keys.ENTER)\n time.sleep(random.random())\n\n html = chrome.page_source\n source = bs(html)\n\n # add scrolling algorithm following number of images to scrape\n # driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n urls = [block.get('src') for block in source.find_all('img') if block.get('src').startswith('https://i.pinimg.com')]\n driver.close()\n\n return urls\n\ndef search_google(search_text):\n chrome = driver('chromedriver.exe')\n chrome.get('https://www.google.com/imghp')\n\n search = chrome.find_element_by_xpath('//*[@id=\"sbtc\"]/div/div[2]/input')\n search.send_keys(search_text + Keys.ENTER)\n\n urls = []\n num = 1\n\n # add scraping for multiple images\n\n for i in range(num):\n img = driver.find_element_by_xpath(f'//*[@id=\"islrg\"]/div[1]/div[{i}]/a[1]/div[1]/img')\n img.click()\n time.sleep(random.random())\n\n image_url = driver.find_elements_by_class_name('n3VNCb')\n if image_url.get_attribute('src')[:4].lower() == 'http':\n urls.append(image_url)\n\n return urls\n\ndef download(url, save_dir):\n response = requests.get(url, stream = True)\n if response.status_code == 200:\n response.raw.decode_content = True\n\n with open(url.split('/')[-1], 'wb') as f:\n shutil.copyfileobj(response.raw, f'{save_dir}/f')\n print('image downloaded')\n else:\n print(\"could not retrieve image\")\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', \n '--text', \n default = None)\n parser.add_argument('-e', \n '--engine', \n default = 'google')\n parser.add_argument('-s', \n '--save_dir', \n default = './images')\n args = parser.parse_args()\n\n if args.engine == 'google':\n result = search_google(args.text)\n else:\n result = search_pinterest(args.text)\n\n for url in result:\n download(url, args.save_dir)\n \nif __name__ == '__main__':\n main()" } ]
1
aleph-im/aleph-client
https://github.com/aleph-im/aleph-client
461d32d12056501ac776fef6f13a00393ed8a47b
15b5841ad214ae46c7f7d8f9dd3080f232ca2587
50433f2aae963d9086c93969e10484c52bf9219b
refs/heads/master
2023-07-22T20:32:39.949822
2023-07-10T08:29:30
2023-07-10T08:29:30
200,687,355
9
15
MIT
2019-08-05T16:02:02
2023-07-09T03:23:28
2023-07-10T08:29:30
Python
[ { "alpha_fraction": 0.6506717801094055, "alphanum_fraction": 0.6540589332580566, "avg_line_length": 32.54924392700195, "blob_id": "282b3f73581abfaea7b33b705b69a3e02c03cf19", "content_id": "7e16b130cc17308b1aa1c97882fbc53e4e8a0b68", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8857, "license_type": "permissive", "max_line_length": 101, "num_lines": 264, "path": "/src/aleph_client/commands/message.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import json\nimport os.path\nimport subprocess\nimport tempfile\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\n\nimport typer\nfrom aleph.sdk import AlephClient, AuthenticatedAlephClient\nfrom aleph.sdk.account import _load_account\nfrom aleph.sdk.conf import settings as sdk_settings\nfrom aleph.sdk.models import MessagesResponse\nfrom aleph.sdk.types import AccountFromPrivateKey, StorageEnum\nfrom aleph_message.models import AlephMessage, ItemHash, MessageType, ProgramMessage\n\nfrom aleph_client.commands import help_strings\nfrom aleph_client.commands.utils import (\n colorful_json,\n colorful_message_json,\n input_multiline,\n setup_logging,\n str_to_datetime,\n)\n\napp = typer.Typer()\n\n\[email protected]()\ndef get(\n item_hash: str,\n):\n with AlephClient(api_server=sdk_settings.API_HOST) as client:\n message = client.get_message(item_hash=ItemHash(item_hash))\n typer.echo(colorful_message_json(message))\n\n\[email protected]()\ndef find(\n pagination: int = 200,\n page: int = 1,\n message_type: Optional[str] = None,\n content_types: Optional[str] = None,\n content_keys: Optional[str] = None,\n refs: Optional[str] = None,\n addresses: Optional[str] = None,\n tags: Optional[str] = None,\n hashes: Optional[str] = None,\n channels: Optional[str] = None,\n chains: Optional[str] = None,\n start_date: Optional[str] = None,\n end_date: Optional[str] = None,\n ignore_invalid_messages: bool = True,\n):\n message_type = MessageType(message_type) if message_type else None\n\n parsed_content_types: Optional[List[str]] = None\n parsed_content_keys: Optional[List[str]] = None\n parsed_refs: Optional[List[str]] = None\n parsed_addresses: Optional[List[str]] = None\n parsed_tags: Optional[List[str]] = None\n parsed_hashes: Optional[List[str]] = None\n parsed_channels: Optional[List[str]] = None\n parsed_chains: Optional[List[str]] = None\n\n parsed_content_types = content_types.split(\",\") if content_types else None\n parsed_content_keys = content_keys.split(\",\") if content_keys else None\n parsed_refs = refs.split(\",\") if refs else None\n parsed_addresses = addresses.split(\",\") if addresses else None\n parsed_tags = tags.split(\",\") if tags else None\n parsed_hashes = hashes.split(\",\") if hashes else None\n parsed_channels = channels.split(\",\") if channels else None\n parsed_chains = chains.split(\",\") if chains else None\n\n message_type = MessageType(message_type) if message_type else None\n\n start_time = str_to_datetime(start_date)\n end_time = str_to_datetime(end_date)\n\n with AlephClient(api_server=sdk_settings.API_HOST) as client:\n response: MessagesResponse = client.get_messages(\n pagination=pagination,\n page=page,\n message_type=message_type,\n content_types=parsed_content_types,\n content_keys=parsed_content_keys,\n refs=parsed_refs,\n addresses=parsed_addresses,\n tags=parsed_tags,\n hashes=parsed_hashes,\n channels=parsed_channels,\n chains=parsed_chains,\n start_date=start_time,\n end_date=end_time,\n ignore_invalid_messages=ignore_invalid_messages,\n )\n typer.echo(colorful_json(response.json(sort_keys=True, indent=4)))\n\n\[email protected]()\ndef post(\n path: Optional[Path] = typer.Option(\n None,\n help=\"Path to the content you want to post. If omitted, you can input your content directly\",\n ),\n type: str = typer.Option(\"test\", help=\"Text representing the message object type\"),\n ref: Optional[str] = typer.Option(None, help=help_strings.REF),\n channel: Optional[str] = typer.Option(default=None, help=help_strings.CHANNEL),\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n debug: bool = False,\n):\n \"\"\"Post a message on aleph.im.\"\"\"\n\n setup_logging(debug)\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n storage_engine: StorageEnum\n content: Dict\n\n if path:\n if not path.is_file():\n typer.echo(f\"Error: File not found: '{path}'\")\n raise typer.Exit(code=1)\n\n file_size = os.path.getsize(path)\n storage_engine = (\n StorageEnum.ipfs if file_size > 4 * 1024 * 1024 else StorageEnum.storage\n )\n\n with open(path, \"r\") as fd:\n content = json.load(fd)\n\n else:\n content_raw = input_multiline()\n storage_engine = (\n StorageEnum.ipfs\n if len(content_raw) > 4 * 1024 * 1024\n else StorageEnum.storage\n )\n try:\n content = json.loads(content_raw)\n except json.decoder.JSONDecodeError:\n typer.echo(\"Not valid JSON\")\n raise typer.Exit(code=2)\n\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n result, status = client.create_post(\n post_content=content,\n post_type=type,\n ref=ref,\n channel=channel,\n inline=True,\n storage_engine=storage_engine,\n )\n\n typer.echo(json.dumps(result.dict(), indent=4))\n\n\[email protected]()\ndef amend(\n item_hash: str = typer.Argument(..., help=\"Hash reference of the message to amend\"),\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n debug: bool = False,\n):\n \"\"\"Amend an existing aleph.im message.\"\"\"\n\n setup_logging(debug)\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n\n with AlephClient(api_server=sdk_settings.API_HOST) as client:\n existing_message: AlephMessage = client.get_message(item_hash=item_hash)\n\n editor: str = os.getenv(\"EDITOR\", default=\"nano\")\n with tempfile.NamedTemporaryFile(suffix=\"json\") as fd:\n # Fill in message template\n fd.write(existing_message.content.json(indent=4).encode())\n fd.seek(0)\n\n # Launch editor\n subprocess.run([editor, fd.name], check=True)\n\n # Read new message\n fd.seek(0)\n new_content_json = fd.read()\n\n content_type = type(existing_message).__annotations__[\"content\"]\n new_content_dict = json.loads(new_content_json)\n new_content = content_type(**new_content_dict)\n\n if isinstance(existing_message, ProgramMessage):\n new_content.replaces = existing_message.item_hash\n else:\n new_content.ref = existing_message.item_hash\n\n typer.echo(new_content)\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n message, _status = client.submit(\n content=new_content.dict(),\n message_type=existing_message.type,\n channel=existing_message.channel,\n )\n typer.echo(f\"{message.json(indent=4)}\")\n\n\[email protected]()\ndef forget(\n hashes: str = typer.Argument(\n ..., help=\"Comma separated list of hash references of messages to forget\"\n ),\n reason: Optional[str] = typer.Option(\n None, help=\"A description of why the messages are being forgotten.\"\n ),\n channel: Optional[str] = typer.Option(default=None, help=help_strings.CHANNEL),\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n debug: bool = False,\n):\n \"\"\"Forget an existing aleph.im message.\"\"\"\n\n setup_logging(debug)\n\n hash_list: List[str] = hashes.split(\",\")\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n client.forget(hashes=hash_list, reason=reason, channel=channel)\n\n\[email protected]()\ndef watch(\n ref: str = typer.Argument(..., help=\"Hash reference of the message to watch\"),\n indent: Optional[int] = typer.Option(None, help=\"Number of indents to use\"),\n debug: bool = False,\n):\n \"\"\"Watch a hash for amends and print amend hashes\"\"\"\n\n setup_logging(debug)\n\n with AlephClient(api_server=sdk_settings.API_HOST) as client:\n original: AlephMessage = client.get_message(item_hash=ref)\n for message in client.watch_messages(\n refs=[ref], addresses=[original.content.address]\n ):\n typer.echo(f\"{message.json(indent=indent)}\")\n" }, { "alpha_fraction": 0.6667920351028442, "alphanum_fraction": 0.6686724424362183, "avg_line_length": 30.282352447509766, "blob_id": "31fbee94539f9d2438d12975392e1bf2f894accd", "content_id": "b172faba60c44dd9ef0c7d62fe818e1ab6c96f2e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2659, "license_type": "permissive", "max_line_length": 105, "num_lines": 85, "path": "/tests/integration/itest_aggregates.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import json\nfrom typing import Dict\n\nimport pytest\nfrom aleph.sdk import AuthenticatedAlephClient\nfrom aleph.sdk.types import Account\n\nfrom tests.integration.toolkit import try_until\n\nfrom .config import REFERENCE_NODE, TARGET_NODE\n\n\nasync def create_aggregate_on_target(\n account: Account,\n key: str,\n content: Dict,\n emitter_node: str,\n receiver_node: str,\n channel=\"INTEGRATION_TESTS\",\n):\n async with AuthenticatedAlephClient(\n account=account, api_server=emitter_node\n ) as emitter_client:\n aggregate_message, message_status = await emitter_client.create_aggregate(\n key=key,\n content=content,\n channel=\"INTEGRATION_TESTS\",\n )\n\n assert aggregate_message.sender == account.get_address()\n assert aggregate_message.channel == channel\n # Note: lots of duplicates in the response\n item_content = json.loads(aggregate_message.item_content)\n assert item_content[\"key\"] == key\n assert item_content[\"content\"] == content\n assert item_content[\"address\"] == account.get_address()\n assert aggregate_message.content.key == key\n assert aggregate_message.content.address == account.get_address()\n assert aggregate_message.content.content == content\n\n async with AuthenticatedAlephClient(\n account=account, api_server=receiver_node\n ) as receiver_client:\n aggregate_from_receiver = await try_until(\n receiver_client.fetch_aggregate,\n lambda aggregate: aggregate is not None,\n timeout=5,\n address=account.get_address(),\n key=key,\n api_server=receiver_node,\n )\n\n for key, value in content.items():\n assert key in aggregate_from_receiver\n assert aggregate_from_receiver[key] == value\n\n\[email protected]\nasync def test_create_aggregate_on_target(fixture_account):\n \"\"\"\n Attempts to create an aggregate on the target node and validates that the aggregate can be fetched\n from the reference node.\n \"\"\"\n await create_aggregate_on_target(\n fixture_account,\n key=\"test_target\",\n content={\"a\": 1, \"b\": 2},\n emitter_node=TARGET_NODE,\n receiver_node=REFERENCE_NODE,\n )\n\n\[email protected]\nasync def test_create_aggregate_on_reference(fixture_account):\n \"\"\"\n Attempts to create an aggregate on the reference node and validates that the aggregate can be fetched\n from the target node.\n \"\"\"\n await create_aggregate_on_target(\n fixture_account,\n key=\"test_reference\",\n content={\"c\": 3, \"d\": 4},\n emitter_node=REFERENCE_NODE,\n receiver_node=TARGET_NODE,\n )\n" }, { "alpha_fraction": 0.5608006119728088, "alphanum_fraction": 0.5608006119728088, "avg_line_length": 29.43678092956543, "blob_id": "467bcb79991e772dd9ee9495521f4b4a3be549b7", "content_id": "643fdcedb40de1b4f8193b22778f020c695b5bfc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2648, "license_type": "permissive", "max_line_length": 84, "num_lines": 87, "path": "/src/aleph_client/vm/app.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "from dataclasses import dataclass\nfrom typing import (\n Any,\n Awaitable,\n Callable,\n Dict,\n List,\n Mapping,\n MutableMapping,\n Optional,\n)\n\nAsgiApplication = Callable\n\n\n@dataclass\nclass EventHandler:\n filters: List[Dict]\n handler: Callable\n\n def matches(self, scope: Mapping[str, Any]) -> bool:\n for filter in self.filters:\n # if [filter matches scope]: TODO\n if True:\n return True\n return False\n\n\nclass AlephApp:\n \"\"\"ASGI compatible wrapper for apps running inside aleph.im Virtual Machines.\n The wrapper adds support to register functions to react to non-HTTP events.\n \"\"\"\n\n http_app: Optional[AsgiApplication] = None\n event_handlers: List[EventHandler]\n\n def __init__(self, http_app: Optional[AsgiApplication] = None):\n self.http_app = http_app\n self.event_handlers = []\n\n def event(self, filters: List[Dict]):\n \"\"\"Use this decorator to register event calls.\n\n ```python\n @app.event(filters=[...])\n def on_event(event):\n ...\n ```\n \"\"\"\n\n def inner(func: Callable):\n # Register the event handler\n event_handler = EventHandler(filters=filters, handler=func)\n self.event_handlers.append(event_handler)\n return func\n\n return inner\n\n async def __call__(\n self,\n scope: MutableMapping[str, Any],\n receive: Optional[Callable[[], Awaitable[Any]]] = None,\n send: Optional[Callable[[Dict[Any, Any]], Awaitable[Any]]] = None,\n ):\n if scope[\"type\"] in (\"http\", \"websocket\", \"lifespan\"):\n if self.http_app:\n await self.http_app(scope=scope, receive=receive, send=send)\n else:\n raise ValueError(\"No HTTP app registered\")\n elif scope[\"type\"] == \"aleph.message\":\n for event_handler in self.event_handlers:\n if event_handler.matches(scope):\n # event_handler.handler(scope=scope, receive=receive, send=send)\n async def send_handler_result():\n result = await event_handler.handler(event=scope)\n if send:\n await send(result)\n else:\n raise ValueError(\"No send method specified\")\n\n return send_handler_result()\n else:\n raise ValueError(f\"Unknown scope type '{scope['type']}'\")\n\n def __getattr__(self, name):\n # Default all calls to the HTTP handler\n return getattr(self.http_app, name)\n" }, { "alpha_fraction": 0.5897436141967773, "alphanum_fraction": 0.7094017267227173, "avg_line_length": 38, "blob_id": "a18855cd53e009ca26e4dc5dfdb1318945a42704", "content_id": "4ec95a278c2092bc131c69aa300809e105c3e6bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "permissive", "max_line_length": 40, "num_lines": 3, "path": "/tests/integration/config.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "TARGET_NODE = \"http://163.172.70.92:4024\"\nREFERENCE_NODE = \"https://api2.aleph.im\"\nTEST_CHANNEL = \"INTEGRATION_TESTS\"\n" }, { "alpha_fraction": 0.5848502516746521, "alphanum_fraction": 0.5889606475830078, "avg_line_length": 28.877193450927734, "blob_id": "bbfd4bf41033ae3c815e61a1877877d1bdbebf64", "content_id": "98abb7da6753007843838679d5277b6d2550b0ac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3406, "license_type": "permissive", "max_line_length": 130, "num_lines": 114, "path": "/examples/store.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import asyncio\n\nimport click\nfrom aleph.sdk.chains.common import get_fallback_private_key\nfrom aleph.sdk.chains.ethereum import ETHAccount\nfrom aleph.sdk.client import AuthenticatedAlephClient\nfrom aleph_message.models import StoreMessage\nfrom aleph_message.status import MessageStatus\n\nDEFAULT_SERVER = \"https://api2.aleph.im\"\n\n\nasync def print_output_hash(message: StoreMessage, status: MessageStatus):\n print(\"Successfully created STORE message\")\n print(f\"File hash ({message.content.item_type}): {message.content.item_hash}\")\n print(\"Sender: \", message.sender)\n print(f\"Message hash: {message.item_hash}\")\n print(\n f\"Explorer URL: https://explorer.aleph.im/address/{message.chain.value}/{message.sender}/message/{message.item_hash}\"\n )\n\n\nasync def do_upload(account, engine, channel, filename=None, file_hash=None):\n async with AuthenticatedAlephClient(\n account=account, api_server=DEFAULT_SERVER\n ) as client:\n print(filename, account.get_address())\n if filename:\n try:\n with open(filename, \"rb\") as f:\n # Do something with the file\n content = f.read()\n if len(content) > 4 * 1024 * 1024 and engine == \"STORAGE\":\n print(\"File too big for native STORAGE engine\")\n return\n message, status = await client.create_store(\n account,\n file_content=content,\n channel=channel,\n storage_engine=engine.lower(),\n )\n except IOError:\n print(\"File not accessible\")\n raise\n\n elif file_hash:\n message, status = await client.create_store(\n account,\n file_hash=file_hash,\n channel=channel,\n storage_engine=engine.lower(),\n )\n\n await print_output_hash(message, status)\n\n\[email protected]()\[email protected](\n \"filename\",\n)\[email protected](\n \"--pkey\",\n envvar=\"PKEY\",\n default=None,\n help=\"Account private key (optional, will default to device.key file)\",\n)\[email protected](\n \"--storage-engine\",\n default=\"IPFS\",\n help=\"Storage engine to use (default: IPFS)\",\n type=click.Choice([\"STORAGE\", \"IPFS\"], case_sensitive=False),\n)\[email protected](\n \"--channel\",\n envvar=\"ALEPH_CHANNEL\",\n default=\"TEST\",\n help=\"Channel to write in (default: TEST)\",\n)\ndef main(filename, pkey=None, storage_engine=\"IPFS\", channel=\"TEST\"):\n \"\"\"Uploads or store FILENAME.\n\n If FILENAME is an IPFS multihash and IPFS is selected as an engine (default), don't try to upload, just pin it to the network.\n Else, uploads the file to the network before pining it.\n \"\"\"\n if pkey is None:\n pkey = get_fallback_private_key()\n\n account = ETHAccount(private_key=pkey)\n\n upload_filename = None\n upload_hash = None\n\n if (\n 46 <= len(filename) <= 48\n and filename.startswith(\"Q\")\n and storage_engine == \"IPFS\"\n ):\n upload_hash = filename\n else:\n upload_filename = filename\n\n asyncio.run(\n do_upload(\n account,\n storage_engine,\n channel,\n filename=upload_filename,\n file_hash=upload_hash,\n )\n )\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5675883293151855, "alphanum_fraction": 0.5729646682739258, "avg_line_length": 27.30434799194336, "blob_id": "1754fc5c0adc86f31c97549c68e663c771af466a", "content_id": "d95d36f9a3c619a5d9e6fb97565f2a44b0af127f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2604, "license_type": "permissive", "max_line_length": 82, "num_lines": 92, "path": "/docs/content/introduction.rst", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "Introduction to Aleph.im\n========================\n\nThe Aleph.im network can be accessed from any API server.\nTo run one yourself, you will need to install\n`PyAleph <https://github.com/aleph-im/PyAleph>`_.\n\n\nData retrieval\n--------------\n\nData retrieval is simple, using ReST APIs on any API server.\nThere is a few helpers available in this library (depending on the requested\ndata type).\n\n\nData structures\n---------------\n\nAll data transfered over the aleph.im network are aleph messages.\n\n.. uml::\n\n @startuml\n entity Message {\n .. Message info ..\n *type : text\n one of: POST, AGGREGATE, STORE\n *channel : text\n (channel of the message, one application ideally has one channel)\n *time : timestamp\n .. Sender info ..\n *sender : text <<address>>\n *chain : text\n (chain of sender: NULS, NULS2, ETH, BNB...)\n -- Content --\n *item_hash <<hash>>\n if IPFS: multihash of json serialization of content\n if internal storage: hash of the content (sha256 only for now)\n if inline: hash of item_content using hash_type (sha256 only for now)\n *item_content : text <<json>>\n mandatory if of inline type, json serialization of the message\n #item_type : text (optional)\n one of: 'ipfs', 'inline', 'storage'.\n default: 'ipfs' if no item_content and hash length 56,\n 'storage' if length 64, 'inline' if there is an item_content.\n #hash_type : text (optional)\n default: sha256 (only supported value for now)\n }\n\n hide circle\n @enduml\n\nActual content sent by regular users can currently be of two types:\n\n- AGGREGATE: a key-value storage specific to an address\n- POST: unique data posts (unique data points, events\n\n.. uml:: \n \n @startuml\n object Message {\n ...\n }\n\n object Aggregate <<message content>> {\n key : text\n address : text <<address>>\n ~ content : object\n time : timestamp\n }\n\n object Post <<message content>> {\n type : text\n address : text <<address>>\n ~ content : object\n time : timestamp\n }\n\n object Store <<message content>> {\n address : text <<address>>\n item_type : same than Message.item_type\n (note: does not support inline)\n item_hash : same than Message.item_hash\n time : timestamp\n }\n\n\n Message ||--o| Aggregate\n Message ||--o| Post\n Message ||--o| Store\n @enduml\n" }, { "alpha_fraction": 0.7439446449279785, "alphanum_fraction": 0.7439446449279785, "avg_line_length": 27.899999618530273, "blob_id": "ae472d0d26f5c0dafa77cc7dd54271e252b1e90d", "content_id": "8cbcd8bfdeaca0441ad502ce5261e92fa4fb9e43", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "permissive", "max_line_length": 89, "num_lines": 20, "path": "/tests/integration/conftest.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import asyncio\n\nimport pytest\nfrom aleph.sdk.chains.common import get_fallback_private_key\nfrom aleph.sdk.chains.ethereum import ETHAccount\n\n\[email protected]\ndef fixture_account():\n private_key = get_fallback_private_key()\n return ETHAccount(private_key)\n\n\n# Fixes the \"Event loop is closed\" error that happens when running several tests in a row\[email protected](scope=\"session\")\ndef event_loop(request):\n \"\"\"Create an instance of the default event loop for each test case.\"\"\"\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()\n" }, { "alpha_fraction": 0.6123807430267334, "alphanum_fraction": 0.6130463480949402, "avg_line_length": 31.897809982299805, "blob_id": "0d9bae93f354dec35902a94b9de60d4b6f3de04a", "content_id": "ecbdec0263607639ead5701cefcb702dcfeb5be4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4507, "license_type": "permissive", "max_line_length": 92, "num_lines": 137, "path": "/src/aleph_client/vm/cache.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import abc\nimport fnmatch\nimport re\nimport threading\nfrom functools import lru_cache\nfrom typing import Any, Dict, List, NewType, Optional, Union\n\nimport aiohttp\nfrom aiohttp import ClientSession\n\nfrom ..conf import settings\n\nCacheKey = NewType(\"CacheKey\", str)\n\n\n@lru_cache()\ndef _get_fallback_session(thread_id: Optional[int]) -> ClientSession:\n if settings.API_UNIX_SOCKET:\n connector = aiohttp.UnixConnector(path=settings.API_UNIX_SOCKET)\n return aiohttp.ClientSession(connector=connector)\n else:\n return aiohttp.ClientSession()\n\n\ndef get_fallback_session() -> ClientSession:\n thread_id = threading.get_native_id()\n return _get_fallback_session(thread_id=thread_id)\n\n\ndef sanitize_cache_key(key: str) -> CacheKey:\n if not re.match(r\"^\\w+$\", key):\n raise ValueError(\"Key may only contain letters, numbers and underscore\")\n return CacheKey(key)\n\n\nclass BaseVmCache(abc.ABC):\n \"\"\"Virtual Machines can use this cache to store temporary data in memory on the host.\"\"\"\n\n @abc.abstractmethod\n async def get(self, key: str) -> Optional[bytes]:\n \"\"\"Get the value for a given key string.\"\"\"\n pass\n\n @abc.abstractmethod\n async def set(self, key: str, value: Union[str, bytes]) -> Any:\n \"\"\"Set the value for a given key string.\"\"\"\n pass\n\n @abc.abstractmethod\n async def delete(self, key: str) -> Any:\n \"\"\"Delete the value for a given key string.\"\"\"\n pass\n\n @abc.abstractmethod\n async def keys(self, pattern: str = \"*\") -> List[str]:\n \"\"\"Get all keys matching a given glob pattern.\"\"\"\n pass\n\n\nclass VmCache(BaseVmCache):\n \"\"\"Virtual Machines can use this cache to store temporary data in memory on the host.\"\"\"\n\n session: ClientSession\n cache: Dict[str, bytes]\n api_host: str\n\n def __init__(\n self, session: Optional[ClientSession] = None, api_host: Optional[str] = None\n ):\n self.session = session or get_fallback_session()\n self.cache = {}\n self.api_host = api_host if api_host else settings.API_HOST\n\n async def get(self, key: str) -> Optional[bytes]:\n sanitized_key = sanitize_cache_key(key)\n async with self.session.get(f\"{self.api_host}/cache/{sanitized_key}\") as resp:\n if resp.status == 404:\n return None\n\n resp.raise_for_status()\n return await resp.read()\n\n async def set(self, key: str, value: Union[str, bytes]) -> Any:\n sanitized_key = sanitize_cache_key(key)\n data = value if isinstance(value, bytes) else value.encode()\n async with self.session.put(\n f\"{self.api_host}/cache/{sanitized_key}\", data=data\n ) as resp:\n resp.raise_for_status()\n return await resp.json()\n\n async def delete(self, key: str) -> Any:\n sanitized_key = sanitize_cache_key(key)\n async with self.session.delete(\n f\"{self.api_host}/cache/{sanitized_key}\"\n ) as resp:\n resp.raise_for_status()\n return await resp.json()\n\n async def keys(self, pattern: str = \"*\") -> List[str]:\n if not re.match(r\"^[\\w?*^\\-]+$\", pattern):\n raise ValueError(\n \"Pattern may only contain letters, numbers, underscore, ?, *, ^, -\"\n )\n async with self.session.get(\n f\"{self.api_host}/cache/?pattern={pattern}\"\n ) as resp:\n resp.raise_for_status()\n return await resp.json()\n\n\nclass TestVmCache(BaseVmCache):\n \"\"\"This is a local, dict-based cache that can be used for testing purposes.\"\"\"\n\n def __init__(self):\n self._cache: Dict[str, bytes] = {}\n\n async def get(self, key: str) -> Optional[bytes]:\n sanitized_key = sanitize_cache_key(key)\n return self._cache.get(sanitized_key)\n\n async def set(self, key: str, value: Union[str, bytes]) -> None:\n sanitized_key = sanitize_cache_key(key)\n data = value if isinstance(value, bytes) else value.encode()\n self._cache[sanitized_key] = data\n\n async def delete(self, key: str) -> None:\n sanitized_key = sanitize_cache_key(key)\n del self._cache[sanitized_key]\n\n async def keys(self, pattern: str = \"*\") -> List[str]:\n if not re.match(r\"^[\\w?*^\\-]+$\", pattern):\n raise ValueError(\n \"Pattern may only contain letters, numbers, underscore, ?, *, ^, -\"\n )\n all_keys = list(self._cache.keys())\n return fnmatch.filter(all_keys, pattern)\n" }, { "alpha_fraction": 0.6768707633018494, "alphanum_fraction": 0.6768707633018494, "avg_line_length": 16.294116973876953, "blob_id": "6d5b6d6f49c61d0443833f65ee01398882529d35", "content_id": "0ec71e67873f413f32b7c86d3c231a68987f6911", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 294, "license_type": "permissive", "max_line_length": 40, "num_lines": 17, "path": "/tests/unit/test_app/main.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "from fastapi import FastAPI\n\nfrom aleph_client.vm.app import AlephApp\n\n# Create a test app\nhttp_app = FastAPI()\napp = AlephApp(http_app=http_app)\n\n\[email protected](\"/\")\nasync def index():\n return {\"index\": \"/\"}\n\n\[email protected](filters=[])\nasync def aleph_event(event):\n print(\"aleph_event\", event)\n" }, { "alpha_fraction": 0.6695464253425598, "alphanum_fraction": 0.730021595954895, "avg_line_length": 27.438596725463867, "blob_id": "7e7705e4e522a219994d19efcee3a6589aadf8c3", "content_id": "f654062123e8cbd30100dd24f3ff61bcaa80efd6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3241, "license_type": "permissive", "max_line_length": 127, "num_lines": 114, "path": "/docs/content/account.rst", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "Accounts\n========\n\nTo send data to the aleph.im network, you need to have an account.\nThis account can be made using any of the supported providers.\n\nCommon\n------\n\nYou will need to instanciate an account using a private key accepted by the\ncorresponding account provider.\n\nIf you don't want to handle the private key yourself, you can use the\n\"fallback\" helper. This searches for a \"device.key\" file in the current folder.\nIf this file isn't found, it will try to create a new key file with a random\nkey.\n\nEthereum\n********\n\nExample using Ethereum:\n\n.. code-block:: python3\n\n from aleph_client.chains.ethereum import get_fallback_account\n\n account = get_fallback_account()\n\nAnother example setting the private key manually:\n\n.. code-block:: python3\n\n from aleph_client.chains.ethereum import ETHAccount\n\n prv = bytes.fromhex(\"xxxxxx\")\n\n account = ETHAccount(prv)\n\nDepending on account provider, the key can be passed as an hex string.\nIt's the case for Ethereum:\n\n.. code-block:: python3\n\n >>> from aleph_client.chains.ethereum import ETHAccount\n >>> account = ETHAccount(\"0x0000000000000000000000000000000000000000000000000000000000000001\")\n >>> account.get_address()\n '0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf'\n\n.. WARNING::\n Do not use this dummy private key, it's just an example!\n\nPolkadot / Substrate\n********************\n\nDOT/Substrate accounts are a bit different. You pass them mnemonics, and optionally an address_type.\n\nExample using Substrate (if you already used a fallback on ethereum or nuls, you might consider deleting the private key file):\n\n.. code-block:: python3\n\n from aleph_client.chains.substrate import get_fallback_account\n\n account = get_fallback_account()\n\nAnother example setting the mnemonics manually:\n\n.. code-block:: python3\n\n from aleph_client.chains.substrate import DOTAccount\n\n account = DOTAccount(\"payment shy team bargain chest fold bless artwork identify breeze pelican category\")\n\n.. WARNING::\n Do not use this dummy private key, it's just an example!\n\nYou can also change the address_type (0 for polkadot, 2 for canary, 42 generic...).\n\n.. code-block:: python3\n\n >>> from aleph_client.chains.substrate import DOTAccount\n >>> account = DOTAccount(\"payment shy team bargain chest fold bless artwork identify breeze pelican category\")\n >>> account.get_address()\n '5CGNMKCscqN2QNcT7Jtuz23ab7JUxh8wTEtXhECZLJn5vCGX'\n >>> account = DOTAccount(\"payment shy team bargain chest fold bless artwork identify breeze pelican category\",\n ... address_type=0)\n >>> account.get_address()\n '1CfVeTwUcdVqucy4wwv8AsjSjJ8ezh5Xjd1rXButPoc6WJY'\n\n.. WARNING::\n Do not use this dummy private key, it's just an example!\n\nNULS\n****\n\nThe NULS provider is very similar.\n\nFallback account:\n\n.. code-block:: python3\n\n from aleph_client.chains.nuls2 import get_fallback_account\n\n account = get_fallback_account()\n\nFrom a private key:\n\n.. code-block:: python3\n\n >>> from aleph_client.chains.nuls2 import NULSAccount\n >>> account = NULSAccount(\n ... bytes.fromhex(\n ... \"0000000000000000000000000000000000000000000000000000000000000001\"))\n >>> account.get_address()\n 'NULSd6Hgb53vAd7ZMoA2E17DUTT4C1nGrJVpn'" }, { "alpha_fraction": 0.5859912633895874, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 19.766233444213867, "blob_id": "ecf6de187d84ada4ccd8a2faf8e85f766e728d97", "content_id": "cc97491d648c6775d840b650d1b5852572d58e17", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1599, "license_type": "permissive", "max_line_length": 77, "num_lines": 77, "path": "/examples/metrics.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "\"\"\" Server metrics upload.\n\"\"\"\n# -*- coding: utf-8 -*-\n\nimport os\nimport platform\nimport time\n\nimport psutil\nfrom aleph.sdk import AuthenticatedAlephClient\nfrom aleph.sdk.account import _load_account\n\n\ndef get_sysinfo():\n uptime = int(time.time() - psutil.boot_time())\n sysinfo = {\n \"uptime\": uptime,\n \"os\": platform.platform(),\n \"load_avg\": os.getloadavg(),\n \"num_cpus\": psutil.cpu_count(),\n }\n\n return sysinfo\n\n\ndef get_memory():\n return psutil.virtual_memory()._asdict()\n\n\ndef get_swap_space():\n sm = psutil.swap_memory()\n swap = {\n \"total\": sm.total,\n \"free\": sm.free,\n \"used\": sm.used,\n \"percent\": sm.percent,\n \"swapped_in\": sm.sin,\n \"swapped_out\": sm.sout,\n }\n return swap\n\n\ndef get_cpu():\n return psutil.cpu_times_percent(0)._asdict()\n\n\ndef get_cpu_cores():\n return [c._asdict() for c in psutil.cpu_times_percent(0, percpu=True)]\n\n\ndef send_metrics(account, metrics):\n with AuthenticatedAlephClient(\n account=account, api_server=\"https://api2.aleph.im\"\n ) as client:\n return client.create_aggregate(\"metrics\", metrics, channel=\"SYSINFO\")\n\n\ndef collect_metrics():\n return {\n \"memory\": get_memory(),\n \"swap\": get_swap_space(),\n \"cpu\": get_cpu(),\n \"cpu_cores\": get_cpu_cores(),\n }\n\n\ndef main():\n account = _load_account()\n while True:\n metrics = collect_metrics()\n message, status = send_metrics(account, metrics)\n print(\"sent\", message.item_hash)\n time.sleep(10)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7303370833396912, "alphanum_fraction": 0.7303370833396912, "avg_line_length": 25.700000762939453, "blob_id": "18c50d8dffcee5227ac7c59e4895b4ac85a5b23d", "content_id": "e695ef5356d9d003f5ba4536c0e760ebc5b5ac6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "permissive", "max_line_length": 94, "num_lines": 10, "path": "/src/aleph_client/main.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "\"\"\"This module only exists for backward compatibility and will be removed in a future release.\n\"\"\"\n\nimport warnings\n\nwarnings.warn(\n \"`aleph_client.main` is deprecated and will be removed. \"\n \"Use `aleph_client.synchronous` instead.\",\n DeprecationWarning,\n)\n" }, { "alpha_fraction": 0.6797176003456116, "alphanum_fraction": 0.6976893544197083, "avg_line_length": 23.73015785217285, "blob_id": "28f1b662a47bf467d3eb8d22befb20091c68a1f9", "content_id": "a1b09133d1fa47d166abda6c3c662eb825c32aaa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1558, "license_type": "permissive", "max_line_length": 156, "num_lines": 63, "path": "/README.rst", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "============\naleph-client\n============\n\nPython Client for the aleph.im network, next generation network of decentralized big data applications.\nDevelopement follows the `Aleph Whitepaper <https://github.com/aleph-im/aleph-whitepaper>`_.\n\nDocumentation\n=============\n\nDocumentation (albeit still vastly incomplete as it is a work in progress) can be found at http://aleph-client.readthedocs.io/ or built from this repo with:\n\n $ python setup.py docs\n\n\nRequirements\n============\n\n- Linux : \n\nSome cryptographic functionalities use curve secp256k1 and require installing\n`libsecp256k1 <https://github.com/bitcoin-core/secp256k1>`_.\n\n $ apt-get install -y python3-pip libsecp256k1-dev\n\n- macOs : \n\n $ brew tap cuber/homebrew-libsecp256k1\n $ brew install libsecp256k1\n\n\nInstallation\n============\n\nUsing pip and `PyPI <https://pypi.org/project/aleph-client/>`_:\n\n $ pip install aleph-client\n\n\nInstallation for development\n============================\n\nIf you want NULS2 support you will need to install nuls2-python (currently only available on github):\n\n $ pip install git+https://github.com/aleph-im/nuls2-python.git\n\n\nTo install from source and still be able to modify the source code:\n\n $ pip install -e .\n or\n $ python setup.py develop\n\n\n\nUsing Docker\n============\n\nUse the Aleph client and it's CLI from within Docker or Podman with:\n\n\t\t$ docker run --rm -ti -v $(pwd)/data:/data ghcr.io/aleph-im/aleph-client/aleph-client:master --help\n\nWarning: This will use an ephemeral key that will be discarded when stopping the container.\n" }, { "alpha_fraction": 0.6820809245109558, "alphanum_fraction": 0.6820809245109558, "avg_line_length": 22.066667556762695, "blob_id": "301270b85e87dc31db38c3af10f17dd3766b9f0a", "content_id": "258e60cbdd0d112865aea27c08fadf79fe262961", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 346, "license_type": "permissive", "max_line_length": 67, "num_lines": 15, "path": "/docker/with-ipfs.entrypoint.sh", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nset -euo pipefail\n\n# Initialize IPFS if it has not been done yet\nif [ ! -f /var/lib/ipfs/config ]; then\n chown -R aleph:aleph /var/lib/ipfs\n su aleph -c \"/opt/go-ipfs/ipfs init --profile server\"\nfi\n\n# Start IPFS as a daemon\nsu aleph -c \"/opt/go-ipfs/ipfs daemon --enable-pubsub-experiment\" &\n\n# Run a shell\nsu aleph -c \"/bin/bash\"\n" }, { "alpha_fraction": 0.6360369324684143, "alphanum_fraction": 0.6375769972801208, "avg_line_length": 26.05555534362793, "blob_id": "dc03f150deb1ea946423af45ba5b200adbeddfeb", "content_id": "2ce0e86fcce7e74bb9d19fda60f8e5f1b1f17f29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1948, "license_type": "permissive", "max_line_length": 83, "num_lines": 72, "path": "/examples/httpgateway.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "\"\"\" Server metrics upload.\n\"\"\"\n\nimport click\nfrom aiohttp import web\nfrom aleph.sdk import AuthenticatedAlephClient\nfrom aleph.sdk.chains.common import get_fallback_private_key\nfrom aleph.sdk.chains.ethereum import ETHAccount\n\napp = web.Application()\nroutes = web.RouteTableDef()\n\n\[email protected](\"/\")\nasync def hello(request):\n return web.Response(text=\"Hello, world\")\n\n\[email protected](\"/p/{source}\")\nasync def source_post(request):\n # print(await request.text())\n data = await request.post()\n data = dict(data.copy().items())\n\n secret = data.pop(\"secret\", None)\n data[\"source\"] = request.match_info[\"source\"]\n\n if app[\"secret\"] is not None:\n if secret != app[\"secret\"]:\n return web.json_response(\n {\"status\": \"error\", \"message\": \"unauthorized secret\"}\n )\n\n async with AuthenticatedAlephClient(\n account=app[\"account\"], api_server=\"https://api2.aleph.im\"\n ) as client:\n message, _status = await client.create_post(\n data,\n \"event\",\n channel=app[\"channel\"],\n )\n\n return web.json_response({\"status\": \"success\", \"item_hash\": message.item_hash})\n\n\[email protected]()\[email protected](\"--host\", default=\"localhost\", help=\"http host\")\[email protected](\"--port\", default=80, help=\"http port\")\[email protected](\"--channel\", default=\"GATEWAY\", help=\"Channel for data post\")\[email protected](\n \"--pkey\",\n default=None,\n help=\"Account private key (optionnal, will default to device.key file)\",\n)\[email protected](\"--secret\", default=None, help=\"Needed secret to be allowed to post\")\ndef main(host, port, channel, pkey=None, secret=None):\n app.add_routes(routes)\n\n app[\"secret\"] = secret\n app[\"channel\"] = channel\n\n if pkey is None:\n pkey = get_fallback_private_key()\n\n account = ETHAccount(private_key=pkey)\n app[\"account\"] = account\n\n web.run_app(app, host=host, port=port)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6298145651817322, "alphanum_fraction": 0.7011412382125854, "avg_line_length": 26.490196228027344, "blob_id": "055d95bc9d5b4dfa695b0fc1346b8b8dd8159ea6", "content_id": "2437151615587c357c9b33e87eb18f07f1e400c4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1402, "license_type": "permissive", "max_line_length": 148, "num_lines": 51, "path": "/docs/content/posts.rst", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": ".. _posts:\n\n=====\nPosts\n=====\n\nPosts are unique data entries, that can be amended later on.\nExample of use:\n\n- Events\n- Blog posts\n- Comments\n- and many more...\n\nGetting posts\n-------------\n\nTo get posts you have two options, either use the get_posts function, and get\nthe posts and their amends. Or use get_message and only get the unique POST\nmessages (with their content obviously).\n\n\nCreating a Post\n---------------\n\nCreating a post means creating a post object and wrapping it in a message.\nThere is an helper for that: create_post.\n\n.. code-block:: python3\n\n >>> from aleph_client.synchronous import create_post\n >>> create_post(account, {'content': 'test'}, post_type='testtype', channel='MY_CHANNEL')\n {'chain': 'NULS2',\n 'channel': 'MY_CHANNEL',\n 'sender': 'NULSd6HgaaV62iEcTZSWoaTrA3U7Jr7Vv1nXS',\n 'type': 'POST',\n 'time': 1573570575.281997,\n 'item_content': '{\"type\":\"testtype\",\"address\":\"NULSd6HgaaV62iEcTZSWoaTrA3U7Jr7Vv1nXS\",\"content\":{\"content\":\"test\"},\"time\":1573570575.2818618}',\n 'item_hash': '02afdbf33ff2c6ddb46349298a4598a8801cec61dbaa8f3a17ba9d1ad6dd8cb1',\n 'signature': 'G7yJjMCPgvX04Dd9rsz0oEuuRFa4PfuKAMOPA3Oblf6vd5YA1x15jvWLL2WycnnzYLEl0usjTiVxBl530ZOmYgw='}\n\n\nAsynchronous version is very similar:\n\n.. code-block:: python3\n\n from aleph_client.asynchronous import create_post\n await create_post(...)\n\nAmending a Post\n---------------\n" }, { "alpha_fraction": 0.7397260069847107, "alphanum_fraction": 0.7397260069847107, "avg_line_length": 23.33333396911621, "blob_id": "14e74b25bd13a8640e9459ed54c49dbb958b3691", "content_id": "85377d4520a3cc1b9c838031a628c5fbf52915f0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 73, "license_type": "permissive", "max_line_length": 64, "num_lines": 3, "path": "/src/aleph_client/vm/__init__.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "\"\"\"\nAleph helpers for apps running inside aleph.im Virtual Machines.\n\"\"\"\n" }, { "alpha_fraction": 0.6474543809890747, "alphanum_fraction": 0.6628242135047913, "avg_line_length": 30.545454025268555, "blob_id": "100725fc62516ac27fe2aaa0a4e7c968529c16d0", "content_id": "d9aa31a7aee3aa0c16786d495e00308e904b7d3e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1041, "license_type": "permissive", "max_line_length": 65, "num_lines": 33, "path": "/tests/unit/test_vm_cache.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom aleph_client.vm.cache import TestVmCache, sanitize_cache_key\n\n\[email protected]\nasync def test_local_vm_cache():\n cache = TestVmCache()\n assert (await cache.get(\"doesnotexist\")) is None\n assert len(await (cache.keys())) == 0\n key = \"thisdoesexist\"\n value = \"yay, I exist!\"\n await cache.set(key, value)\n cached_value = await cache.get(key)\n assert cached_value is not None\n assert cached_value.decode() == value\n assert (await cache.keys())[0] == key\n assert (await cache.keys(\"*exist\"))[0] == key\n await cache.delete(key)\n assert (await cache.get(key)) is None\n assert len(await (cache.keys())) == 0\n\n\ndef test_sanitize_cache_keys():\n assert sanitize_cache_key(\"abc\")\n assert sanitize_cache_key(\"abc123\")\n assert sanitize_cache_key(\"abc_123\")\n with pytest.raises(ValueError):\n sanitize_cache_key(\"abc-123\")\n with pytest.raises(ValueError):\n sanitize_cache_key(\"abc!123\")\n with pytest.raises(ValueError):\n assert sanitize_cache_key(\"*\")\n" }, { "alpha_fraction": 0.6893470883369446, "alphanum_fraction": 0.7024055123329163, "avg_line_length": 25.454545974731445, "blob_id": "79abfdc1c0c0ed966e937cb5e25fc96c647f0d02", "content_id": "e331730f6bf1895956d45d672961df12e0e5f5e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1455, "license_type": "permissive", "max_line_length": 75, "num_lines": 55, "path": "/tests/unit/test_chain_ethereum.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "from dataclasses import asdict, dataclass\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\n\nimport pytest\nfrom aleph.sdk.chains.ethereum import get_fallback_account\n\n\n@dataclass\nclass Message:\n chain: str\n sender: str\n type: str\n item_hash: str\n\n\ndef test_get_fallback_account():\n with NamedTemporaryFile() as private_key_file:\n account = get_fallback_account(path=Path(private_key_file.name))\n assert account.CHAIN == \"ETH\"\n assert account.CURVE == \"secp256k1\"\n assert account._account.address\n\n\[email protected]\nasync def test_ETHAccount(ethereum_account):\n account = ethereum_account\n\n message = Message(\"ETH\", account.get_address(), \"SomeType\", \"ItemHash\")\n signed = await account.sign_message(asdict(message))\n assert signed[\"signature\"]\n assert len(signed[\"signature\"]) == 132\n\n address = account.get_address()\n assert address\n assert type(address) == str\n assert len(address) == 42\n\n pubkey = account.get_public_key()\n assert type(pubkey) == str\n assert len(pubkey) == 68\n\n\[email protected]\nasync def test_decrypt_secp256k1(ethereum_account):\n account = ethereum_account\n\n assert account.CURVE == \"secp256k1\"\n content = b\"SomeContent\"\n\n encrypted = await account.encrypt(content)\n assert type(encrypted) == bytes\n decrypted = await account.decrypt(encrypted)\n assert type(decrypted) == bytes\n assert content == decrypted\n" }, { "alpha_fraction": 0.6514360308647156, "alphanum_fraction": 0.6553524732589722, "avg_line_length": 21.52941131591797, "blob_id": "9f37f5d8aebb2708379c60c66d202ad3a7815835", "content_id": "eb96d7f27d4fb349a2f0ceea461933ddcc4a3824", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 766, "license_type": "permissive", "max_line_length": 90, "num_lines": 34, "path": "/tests/unit/test_vm_app.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import asyncio\n\nimport pytest\nfrom fastapi.testclient import TestClient\n\nfrom tests.unit.test_app.main import app\n\n# Note: for some reason, the test client must be declared at the same level as the import.\nclient = TestClient(app)\n\n\[email protected]\nasync def test_app_event():\n\n # Call the app with an ASGI context\n scope = {\n \"type\": \"aleph.message\",\n }\n\n async def receive():\n return {\"type\": \"aleph.message\", \"body\": b\"BODY\", \"more_body\": False}\n\n send_queue: asyncio.Queue = asyncio.Queue()\n\n async def send(dico):\n await send_queue.put(dico)\n\n await app(scope, receive, send)\n\n\ndef test_app_http():\n response = client.get(\"/\")\n assert response.status_code == 200\n assert response.json() == {\"index\": \"/\"}\n" }, { "alpha_fraction": 0.6282894611358643, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 22.384614944458008, "blob_id": "6aaef505fa0e8d86dd2e050617624cd013357278", "content_id": "2bf6033293891d625571ace52b10701d4bfc7a4a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "permissive", "max_line_length": 63, "num_lines": 26, "path": "/tests/integration/toolkit.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import asyncio\nimport time\nfrom typing import Awaitable, Callable, TypeVar\n\nT = TypeVar(\"T\")\n\n\nasync def try_until(\n coroutine: Callable[..., Awaitable[T]],\n condition: Callable[[T], bool],\n timeout: float,\n time_between_attempts: float = 0.5,\n *args,\n **kwargs,\n) -> T:\n\n start_time = time.monotonic()\n\n while time.monotonic() < start_time + timeout:\n result = await coroutine(*args, **kwargs)\n if condition(result):\n return result\n\n await asyncio.sleep(time_between_attempts)\n else:\n raise TimeoutError(f\"No success in {timeout} seconds.\")\n" }, { "alpha_fraction": 0.6509259343147278, "alphanum_fraction": 0.6513888835906982, "avg_line_length": 33.83871078491211, "blob_id": "0e338b46369a4f99805fc6ba2f01d6243e243d8d", "content_id": "ab4781892271fef14fcf4e41e8103f96e8c3ed34", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2160, "license_type": "permissive", "max_line_length": 82, "num_lines": 62, "path": "/src/aleph_client/utils.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import logging\nimport os\nfrom pathlib import Path\nfrom shutil import make_archive\nfrom typing import Tuple, Type\nfrom zipfile import BadZipFile, ZipFile\n\nfrom aleph.sdk.types import GenericMessage\nfrom aleph_message.models import MessageType\nfrom aleph_message.models.program import Encoding\n\nfrom aleph_client.conf import settings\n\nlogger = logging.getLogger(__name__)\n\n\ntry:\n import magic\nexcept ImportError:\n logger.info(\"Could not import library 'magic', MIME type detection disabled\")\n magic = None # type:ignore\n\n\ndef try_open_zip(path: Path) -> None:\n \"\"\"Try opening a zip to check if it is valid\"\"\"\n assert path.is_file()\n with open(path, \"rb\") as archive_file:\n with ZipFile(archive_file, \"r\") as archive:\n if not archive.namelist():\n raise BadZipFile(\"No file in the archive.\")\n\n\ndef create_archive(path: Path) -> Tuple[Path, Encoding]:\n \"\"\"Create a zip archive from a directory\"\"\"\n if os.path.isdir(path):\n if settings.CODE_USES_SQUASHFS:\n logger.debug(\"Creating squashfs archive...\")\n archive_path = Path(f\"{path}.squashfs\")\n os.system(f\"mksquashfs {path} {archive_path} -noappend\")\n assert archive_path.is_file()\n return archive_path, Encoding.squashfs\n else:\n logger.debug(\"Creating zip archive...\")\n make_archive(str(path), \"zip\", path)\n archive_path = Path(f\"{path}.zip\")\n return archive_path, Encoding.zip\n elif os.path.isfile(path):\n if path.suffix == \".squashfs\" or (\n magic and magic.from_file(path).startswith(\"Squashfs filesystem\")\n ):\n return path, Encoding.squashfs\n else:\n try_open_zip(Path(path))\n return path, Encoding.zip\n else:\n raise FileNotFoundError(\"No file or directory to create the archive from\")\n\n\ndef get_message_type_value(message_type: Type[GenericMessage]) -> MessageType:\n \"\"\"Returns the value of the 'type' field of a message type class.\"\"\"\n type_literal = message_type.__annotations__[\"type\"]\n return type_literal.__args__[0] # Get the value from a Literal\n" }, { "alpha_fraction": 0.6344269514083862, "alphanum_fraction": 0.6510438919067383, "avg_line_length": 27.277109146118164, "blob_id": "b2119029f07270b6f0546385495b600e057e8c37", "content_id": "70fe9f4786bf06ad88c81237566f787619a4bd78", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2347, "license_type": "permissive", "max_line_length": 78, "num_lines": 83, "path": "/docs/content/aggregates.rst", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": ".. _aggregates:\n\n==========\nAggregates\n==========\n\nAggregates are a key-value store specific to an account.\nEach time a new aggregate message is received for a specific account, the\nnodes add a new layer on top and mutate the global storage for this account.\n\nQuery aggregate of an account\n-----------------------------\n\nTo query keys from an account aggregate, you need to call the fetch\naggregate function.\n\nSynchronous version:\n\n.. code-block:: python3\n\n >>> from aleph_client.synchronous import fetch_aggregate\n >>> fetch_aggregate(\"0x06DE0C46884EbFF46558Cd1a9e7DA6B1c3E9D0a8\",\n ... \"profile\")\n {\"bio\": \"tester\", \"name\": \"Moshe on Ethereum\"}\n\n\nMutate aggregate\n----------------\n\nTo mutate an aggregate you need to call the create_aggregate function (it will\ncreate an AGGREGATE type message for you and submit it).\nYou need a valid account to do so.\n\nasynchronous version (assumes you already have an account instanciated):\n\n.. code-block:: python3\n\n >>> from aleph_client.synchronous import create_aggregate, fetch_aggregate\n >>> create_aggregate(\n ... account, 'testkey', {'a': 1, 'b': 2}, channel='MY_CHANNEL')\n >>> fetch_aggregate(account.get_address(), 'testkey')\n {'a': 1, 'b': 2}\n >>> create_aggregate(\n ... account, 'testkey', {'a': 2, 'c': 4}, channel='MY_CHANNEL')\n >>> fetch_aggregate(account.get_address(), 'testkey')\n {'a': 2, 'b': 2, 'c': 4}\n\nAsynchronous version is very similar:\n\n.. code-block:: python3\n\n from aleph_client.asynchronous import create_aggregate\n await create_aggregate(...)\n\nIf you want to set an aggregate on another address than the one of your\naccount, this address should have something similar to this in its\n\"security\" key:\n\n.. code-block:: python3\n\n >>> fetch_aggregate('TARGET_ADDRESS', 'security')\n {'authorizations': [\n {\n 'address': 'YOUR_ADDRESS',\n 'types': ['AGGREGATE]\n 'aggregate_keys': ['testkey']\n }\n ]}\n\nTo write to this address 'testkey' aggregate key:\n\n.. code-block:: python3\n\n >>> create_aggregate(\n ... account, 'testkey', {'a': 1, 'b': 2}, channel='MY_CHANNEL',\n ... address='TARGET_ADDRESS')\n\n\n.. note::\n\n For more information on the authorizations model, see\n `this pyaleph doc\n <https://pyaleph.readthedocs.io/en/latest/protocol/authorizations.html>`_.\n" }, { "alpha_fraction": 0.7103365659713745, "alphanum_fraction": 0.7103365659713745, "avg_line_length": 33.66666793823242, "blob_id": "2e073750912bc9f7b3b99d0036fc6f57a785716f", "content_id": "e5583e974e03102f56debcbe2c65e6d6a2145b6b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1664, "license_type": "permissive", "max_line_length": 83, "num_lines": 48, "path": "/src/aleph_client/commands/aggregate.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "from pathlib import Path\nfrom typing import Optional\n\nimport typer\nfrom aleph.sdk.account import _load_account\nfrom aleph.sdk.client import AuthenticatedAlephClient\nfrom aleph.sdk.conf import settings as sdk_settings\nfrom aleph.sdk.types import AccountFromPrivateKey\nfrom aleph_message.models import MessageType\n\nfrom aleph_client.commands import help_strings\nfrom aleph_client.commands.utils import setup_logging\n\napp = typer.Typer()\n\n\[email protected]()\ndef forget(\n key: str = typer.Argument(..., help=\"Aggregate item hash to be removed.\"),\n reason: Optional[str] = typer.Option(\n None, help=\"A description of why the messages are being forgotten\"\n ),\n channel: Optional[str] = typer.Option(default=None, help=help_strings.CHANNEL),\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n debug: bool = False,\n):\n \"\"\"Forget all the messages composing an aggregate.\"\"\"\n\n setup_logging(debug)\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n message_response = client.get_messages(\n addresses=[account.get_address()],\n message_type=MessageType.aggregate.value,\n content_keys=[key],\n )\n hash_list = [message[\"item_hash\"] for message in message_response.messages]\n\n client.forget(hashes=hash_list, reason=reason, channel=channel)\n" }, { "alpha_fraction": 0.5490723252296448, "alphanum_fraction": 0.553912341594696, "avg_line_length": 28.99193572998047, "blob_id": "8e047748aa7ae5738a178e35a8f67ddb543ddd79", "content_id": "0da1734a8ee34a8e0823a7a8f96a83e91bdb0971", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3719, "license_type": "permissive", "max_line_length": 93, "num_lines": 124, "path": "/src/aleph_client/commands/utils.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import logging\nfrom typing import Dict, List, Optional, Union\n\nfrom aleph.sdk.types import GenericMessage\nfrom pygments import highlight\nfrom pygments.formatters.terminal256 import Terminal256Formatter\nfrom pygments.lexers import JsonLexer\nfrom typer import echo\nfrom datetime import datetime\n\n\ndef colorful_json(obj: str):\n \"\"\"Render a JSON string with colors.\"\"\"\n return highlight(\n obj,\n lexer=JsonLexer(),\n formatter=Terminal256Formatter(),\n )\n\n\ndef colorful_message_json(message: GenericMessage):\n \"\"\"Render a message in JSON with colors.\"\"\"\n return colorful_json(message.json(sort_keys=True, indent=4))\n\n\ndef input_multiline() -> str:\n \"\"\"Prompt the user for a multiline input.\"\"\"\n echo(\"Enter/Paste your content. Ctrl-D or Ctrl-Z ( windows ) to save it.\")\n contents = \"\"\n while True:\n try:\n line = input()\n except EOFError:\n break\n contents += line + \"\\n\"\n return contents\n\n\ndef setup_logging(debug: bool = False):\n level = logging.DEBUG if debug else logging.WARNING\n logging.basicConfig(level=level)\n\n\ndef yes_no_input(text: str, default: Optional[bool] = None):\n while True:\n if default is True:\n response = input(f\"{text} [Y/n] \")\n elif default is False:\n response = input(f\"{text} [y/N] \")\n else:\n response = input(f\"{text} \")\n\n if response.lower() in (\"y\", \"yes\"):\n return True\n elif response.lower() in (\"n\", \"no\"):\n return False\n elif response == \"\" and default is not None:\n return default\n else:\n if default is None:\n echo(\"Please enter 'y', 'yes', 'n' or 'no'\")\n else:\n echo(\"Please enter 'y', 'yes', 'n', 'no' or nothing\")\n continue\n\n\ndef prompt_for_volumes():\n while yes_no_input(\"Add volume ?\", default=False):\n comment = input(\"Description: \") or None\n mount = input(\"Mount: \")\n persistent = yes_no_input(\"Persist on VM host ?\", default=False)\n if persistent:\n name = input(\"Volume name: \")\n size_mib = int(input(\"Size in MiB: \"))\n yield {\n \"comment\": comment,\n \"mount\": mount,\n \"name\": name,\n \"persistence\": \"host\",\n \"size_mib\": size_mib,\n }\n else:\n ref = input(\"Ref: \")\n use_latest = yes_no_input(\"Use latest version ?\", default=True)\n yield {\n \"comment\": comment,\n \"mount\": mount,\n \"ref\": ref,\n \"use_latest\": use_latest,\n }\n\n\ndef volume_to_dict(volume: List[str]) -> Optional[Dict[str, Union[str, int]]]:\n if not volume:\n return None\n dict_store: Dict[str, Union[str, int]] = {}\n for word in volume:\n split_values = word.split(\",\")\n for param in split_values:\n p = param.split(\"=\")\n if p[1].isdigit():\n dict_store[p[0]] = int(p[1])\n elif p[1] in [\"True\", \"true\", \"False\", \"false\"]:\n dict_store[p[0]] = bool(p[1].capitalize())\n else:\n dict_store[p[0]] = p[1]\n\n return dict_store\n\n\ndef str_to_datetime(date: Optional[str]) -> Optional[datetime]:\n \"\"\"\n Converts a string representation of a date/time to a datetime object.\n\n The function can accept either a timestamp or an ISO format datetime string as the input.\n \"\"\"\n if date is None:\n return None\n try:\n date_f = float(date)\n return datetime.fromtimestamp(date_f)\n except ValueError:\n pass\n return datetime.fromisoformat(date)\n" }, { "alpha_fraction": 0.7345537543296814, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 28.133333206176758, "blob_id": "ff5a86214309829661eee7760d0e1e3a64413e63", "content_id": "c4b18405d57960502e6e8bfc075ebae12d1288a0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 437, "license_type": "permissive", "max_line_length": 121, "num_lines": 15, "path": "/scripts/build-and-test.sh", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nset -euf\n\n# Use Podman if installed, else use Docker\nif hash podman 2> /dev/null\nthen\n DOCKER_COMMAND=podman\nelse\n DOCKER_COMMAND=docker\nfi\n\n$DOCKER_COMMAND build -t aleph-client -f docker/Dockerfile .\n$DOCKER_COMMAND run -ti --rm --entrypoint /opt/venv/bin/pytest aleph-client /opt/aleph-client/ \"$@\"\n$DOCKER_COMMAND run -ti --rm --entrypoint /opt/venv/bin/mypy aleph-client /opt/aleph-client/src/ --ignore-missing-imports\n" }, { "alpha_fraction": 0.5960482954978943, "alphanum_fraction": 0.657153308391571, "avg_line_length": 28.70652198791504, "blob_id": "18f01180bce31288441dae281851645d8643a9c6", "content_id": "8eb757eb68a916b976e36f5a00f588eb5196f38f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2733, "license_type": "permissive", "max_line_length": 88, "num_lines": 92, "path": "/tests/unit/test_commands.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import subprocess\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\n\nimport pytest\nfrom aleph.sdk.chains.common import generate_key\nfrom typer.testing import CliRunner\n\nfrom aleph_client.__main__ import app\nfrom typing import Generator\n\nrunner = CliRunner()\n\n\[email protected]\ndef empty_account_file() -> Generator[Path, None, None]:\n with NamedTemporaryFile() as key_file:\n yield Path(key_file.name)\n\n\[email protected]\ndef account_file(empty_account_file: Path) -> Path:\n private_key = generate_key()\n empty_account_file.write_bytes(private_key)\n return empty_account_file\n\n\ndef test_account_create(account_file: Path):\n old_key = account_file.read_bytes()\n result = runner.invoke(\n app, [\"account\", \"create\", \"--replace\", \"--private-key-file\", str(account_file)]\n )\n assert result.exit_code == 0, result.stdout\n new_key = account_file.read_bytes()\n assert new_key != old_key\n\n\ndef test_account_address(account_file: Path):\n result = runner.invoke(\n app, [\"account\", \"address\", \"--private-key-file\", str(account_file)]\n )\n assert result.exit_code == 0\n assert result.stdout.startswith(\"0x\")\n assert len(result.stdout.strip()) == 42\n\n\ndef test_account_export_private_key(account_file: Path):\n result = runner.invoke(\n app, [\"account\", \"export-private-key\", \"--private-key-file\", str(account_file)]\n )\n assert result.exit_code == 0\n assert result.stdout.startswith(\"0x\")\n assert len(result.stdout.strip()) == 66\n\n\ndef test_message_get():\n # Use subprocess to avoid border effects between tests caused by the initialisation\n # of the aiohttp client session out of an async context in the SDK. This avoids\n # a \"no running event loop\" error when running several tests back to back.\n result = subprocess.run(\n [\n \"aleph\",\n \"message\",\n \"get\",\n \"bd79839bf96e595a06da5ac0b6ba51dea6f7e2591bb913deccded04d831d29f4\",\n ],\n capture_output=True,\n )\n assert result.returncode == 0\n assert b\"0x101d8D16372dBf5f1614adaE95Ee5CCE61998Fc9\" in result.stdout\n\n\ndef test_message_find():\n result = subprocess.run(\n [\n \"aleph\",\n \"message\",\n \"find\",\n \"--pagination=1\",\n \"--page=1\",\n \"--start-date=1234\",\n \"--chains=ETH\",\n \"--hashes=bd79839bf96e595a06da5ac0b6ba51dea6f7e2591bb913deccded04d831d29f4\",\n ],\n capture_output=True,\n )\n assert result.returncode == 0\n assert b\"0x101d8D16372dBf5f1614adaE95Ee5CCE61998Fc9\" in result.stdout\n assert (\n b\"bd79839bf96e595a06da5ac0b6ba51dea6f7e2591bb913deccded04d831d29f4\"\n in result.stdout\n )\n" }, { "alpha_fraction": 0.6382978558540344, "alphanum_fraction": 0.6637259721755981, "avg_line_length": 29.58730125427246, "blob_id": "03808e2a95ef13b4a7aebdfe08fae73dff5e7690", "content_id": "6fcb9ae76e7a3399a8888855037d0be63d042295", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1927, "license_type": "permissive", "max_line_length": 88, "num_lines": 63, "path": "/src/aleph_client/conf.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import os\nfrom pathlib import Path\nfrom shutil import which\nfrom typing import Optional\n\nfrom pydantic import BaseSettings, Field\n\n\nclass Settings(BaseSettings):\n CONFIG_HOME: Optional[str] = None\n\n # In case the user does not want to bother with handling private keys himself,\n # do an ugly and insecure write and read from disk to this file.\n PRIVATE_KEY_FILE: Path = Field(\n default=Path(\"ethereum.key\"),\n description=\"Path to the private key used to sign messages\",\n )\n\n PRIVATE_KEY_STRING: Optional[str] = None\n API_HOST: str = \"https://api2.aleph.im\"\n MAX_INLINE_SIZE: int = 50000\n API_UNIX_SOCKET: Optional[str] = None\n REMOTE_CRYPTO_HOST: Optional[str] = None\n REMOTE_CRYPTO_UNIX_SOCKET: Optional[str] = None\n ADDRESS_TO_USE: Optional[str] = None\n\n DEFAULT_CHANNEL: str = \"TEST\"\n DEFAULT_RUNTIME_ID: str = (\n \"bd79839bf96e595a06da5ac0b6ba51dea6f7e2591bb913deccded04d831d29f4\"\n )\n DEFAULT_VM_MEMORY: int = 128\n DEFAULT_VM_VCPUS: int = 1\n DEFAULT_VM_TIMEOUT: float = 30.0\n\n CODE_USES_SQUASHFS: bool = which(\"mksquashfs\") is not None # True if command exists\n\n VM_URL_PATH = \"https://aleph.sh/vm/{hash}\"\n VM_URL_HOST = \"https://{hash_base32}.aleph.sh\"\n\n class Config:\n env_prefix = \"ALEPH_\"\n case_sensitive = False\n env_file = \".env\"\n\n\n# Settings singleton\nsettings = Settings()\n\nif settings.CONFIG_HOME is None:\n xdg_data_home = os.environ.get(\"XDG_DATA_HOME\")\n if xdg_data_home is not None:\n os.environ[\"ALEPH_CONFIG_HOME\"] = str(Path(xdg_data_home, \".aleph-im\"))\n else:\n home = os.path.expanduser(\"~\")\n os.environ[\"ALEPH_CONFIG_HOME\"] = str(Path(home, \".aleph-im\"))\n\n settings = Settings()\n\nassert settings.CONFIG_HOME\nif str(settings.PRIVATE_KEY_FILE) == \"ethereum.key\":\n settings.PRIVATE_KEY_FILE = Path(\n settings.CONFIG_HOME, \"private-keys\", \"ethereum.key\"\n )\n" }, { "alpha_fraction": 0.6705153584480286, "alphanum_fraction": 0.6736130714416504, "avg_line_length": 29.350427627563477, "blob_id": "829b96e84a1ed43ef07f377e495e6fee517c3c08", "content_id": "52020e02a1ca51305b9277810fbf09c6f117dcd4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3551, "license_type": "permissive", "max_line_length": 85, "num_lines": 117, "path": "/src/aleph_client/commands/account.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import base64\nimport logging\nfrom pathlib import Path\nfrom typing import Optional\n\nimport typer\nfrom aleph.sdk.account import _load_account\nfrom aleph.sdk.chains.common import generate_key\nfrom aleph.sdk.chains.ethereum import ETHAccount\nfrom aleph.sdk.conf import settings as sdk_settings\nfrom aleph.sdk.types import AccountFromPrivateKey\nfrom typer.colors import GREEN, RED\n\n\nfrom aleph_client.commands import help_strings\nfrom aleph_client.commands.utils import setup_logging\n\nlogger = logging.getLogger(__name__)\napp = typer.Typer()\n\n\[email protected]()\ndef create(\n private_key: Optional[str] = typer.Option(None, help=help_strings.PRIVATE_KEY),\n private_key_file: Optional[Path] = typer.Option(\n ..., help=help_strings.PRIVATE_KEY_FILE\n ),\n replace: bool = False,\n debug: bool = False,\n):\n \"\"\"Create or import a private key.\"\"\"\n\n setup_logging(debug)\n\n if private_key_file is None:\n private_key_file = Path(\n typer.prompt(\n \"Enter file in which to save the key\", sdk_settings.PRIVATE_KEY_FILE\n )\n )\n\n if private_key_file.exists() and not replace:\n typer.secho(f\"Error: key already exists: '{private_key_file}'\", fg=RED)\n\n raise typer.Exit(1)\n\n private_key_bytes: bytes\n if private_key is not None:\n # Validate the private key bytes by instantiating an account.\n _load_account(private_key_str=private_key, account_type=ETHAccount)\n private_key_bytes = private_key.encode()\n else:\n private_key_bytes = generate_key()\n\n if not private_key_bytes:\n typer.secho(\"An unexpected error occurred!\", fg=RED)\n raise typer.Exit(2)\n\n private_key_file.parent.mkdir(parents=True, exist_ok=True)\n private_key_file.write_bytes(private_key_bytes)\n typer.secho(f\"Private key stored in {private_key_file}\", fg=RED)\n\n\[email protected]()\ndef address(\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n):\n \"\"\"\n Display your public address.\n \"\"\"\n\n if private_key is not None:\n private_key_file = None\n elif private_key_file and not private_key_file.exists():\n typer.secho(\"No private key available\", fg=RED)\n raise typer.Exit(code=1)\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n typer.echo(account.get_address())\n\n\[email protected]()\ndef export_private_key(\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n):\n \"\"\"\n Display your private key.\n \"\"\"\n\n if private_key is not None:\n private_key_file = None\n elif private_key_file and not private_key_file.exists():\n typer.secho(\"No private key available\", fg=RED)\n raise typer.Exit(code=1)\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n if hasattr(account, \"private_key\"):\n private_key_hex: str = base64.b16encode(account.private_key).decode().lower()\n typer.echo(f\"0x{private_key_hex}\")\n else:\n typer.secho(f\"Private key cannot be read for {account}\", fg=RED)\n\n\[email protected]()\ndef path():\n if sdk_settings.PRIVATE_KEY_FILE:\n typer.echo(sdk_settings.PRIVATE_KEY_FILE)\n" }, { "alpha_fraction": 0.6384750008583069, "alphanum_fraction": 0.6411042809486389, "avg_line_length": 34.65625, "blob_id": "7b48853458c7e2d3409ecfc9fd329c51ae056340", "content_id": "d98d0c467d4ba30ddfe88274eccc40ac90a81817", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4564, "license_type": "permissive", "max_line_length": 106, "num_lines": 128, "path": "/tests/integration/itest_forget.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "from typing import Callable, Dict\n\nimport pytest\nfrom aleph.sdk import AlephClient, AuthenticatedAlephClient\nfrom aleph.sdk.types import Account\n\nfrom .config import REFERENCE_NODE, TARGET_NODE, TEST_CHANNEL\nfrom .toolkit import try_until\nfrom aleph.sdk import AlephClient\n\n\nasync def create_and_forget_post(\n account: Account, emitter_node: str, receiver_node: str, channel=TEST_CHANNEL\n) -> str:\n async with AuthenticatedAlephClient(\n account=account, api_server=receiver_node\n ) as receiver_client:\n\n async def wait_matching_posts(\n item_hash: str, condition: Callable[[Dict], bool], timeout: int = 5\n ):\n return await try_until(\n receiver_client.get_posts,\n condition,\n timeout=timeout,\n hashes=[item_hash],\n api_server=receiver_node,\n )\n\n async with AuthenticatedAlephClient(\n account=account, api_server=receiver_node\n ) as emitter_client:\n post_message, message_status = await emitter_client.create_post(\n post_content=\"A considerate and politically correct post.\",\n post_type=\"POST\",\n channel=\"INTEGRATION_TESTS\",\n )\n\n # Wait for the message to appear on the receiver. We don't check the values,\n # they're checked in other integration tests.\n get_post_response = await wait_matching_posts(\n post_message.item_hash,\n lambda response: len(response[\"posts\"]) > 0,\n )\n print(get_post_response)\n\n post_hash = post_message.item_hash\n reason = \"This well thought-out content offends me!\"\n forget_message, forget_status = await emitter_client.forget(\n hashes=[post_hash],\n reason=reason,\n channel=channel,\n )\n\n assert forget_message.sender == account.get_address()\n assert forget_message.content.reason == reason\n assert forget_message.content.hashes == [post_hash]\n\n print(forget_message)\n\n # Wait until the message is forgotten\n\n forgotten_posts = await wait_matching_posts(\n post_hash,\n lambda response: \"forgotten_by\" in response[\"posts\"][0],\n timeout=15,\n )\n\n assert len(forgotten_posts[\"posts\"]) == 1\n forgotten_post = forgotten_posts[\"posts\"][0]\n assert forgotten_post[\"forgotten_by\"] == [forget_message.item_hash]\n assert forgotten_post[\"item_content\"] is None\n print(forgotten_post)\n\n return post_hash\n\n\[email protected]\nasync def test_create_and_forget_post_on_target(fixture_account):\n \"\"\"\n Create a post on the target node, then forget it and check that the change is propagated\n to the reference node.\n \"\"\"\n _ = await create_and_forget_post(fixture_account, TARGET_NODE, REFERENCE_NODE)\n\n\[email protected]\nasync def test_create_and_forget_post_on_reference(fixture_account):\n \"\"\"\n Create a post on the reference node, then forget it and check that the change is propagated\n to the target node.\n \"\"\"\n _ = await create_and_forget_post(fixture_account, REFERENCE_NODE, TARGET_NODE)\n\n\[email protected]\nasync def test_forget_a_forget_message(fixture_account):\n \"\"\"\n Attempts to forget a forget message. This should fail.\n \"\"\"\n\n # TODO: this test should be moved to the PyAleph API tests, once a framework is in place.\n async with AlephClient(api_server=TARGET_NODE) as client:\n post_hash = await create_and_forget_post(\n fixture_account, TARGET_NODE, TARGET_NODE\n )\n get_post_response = await client.get_posts(hashes=[post_hash])\n assert len(get_post_response[\"posts\"]) == 1\n post = get_post_response[\"posts\"][0]\n\n forget_message_hash = post[\"forgotten_by\"][0]\n async with AuthenticatedAlephClient(account=fixture_account, api_server=TARGET_NODE) as my_client:\n forget_message, forget_status = await my_client.forget(\n hashes=[forget_message_hash],\n reason=\"I want to remember this post. Maybe I can forget I forgot it?\",\n channel=TEST_CHANNEL,\n )\n\n print(forget_message)\n\n get_forget_message_response = await client.get_messages(\n hashes=[forget_message_hash], channels=[TEST_CHANNEL]\n )\n assert len(get_forget_message_response.messages) == 1\n forget_message = get_forget_message_response.messages[0]\n print(forget_message)\n\n assert \"forgotten_by\" not in forget_message\n" }, { "alpha_fraction": 0.7225806713104248, "alphanum_fraction": 0.725806474685669, "avg_line_length": 21.14285659790039, "blob_id": "1afc94a4917f250ff71d303c809498206548f86f", "content_id": "ee5032fea73b7bd1b915a6ddf3c600d68609a23f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 310, "license_type": "permissive", "max_line_length": 94, "num_lines": 14, "path": "/scripts/build-and-shell.sh", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nset -euf\n\n# Use Podman if installed, else use Docker\nif hash podman 2> /dev/null\nthen\n DOCKER_COMMAND=podman\nelse\n DOCKER_COMMAND=docker\nfi\n\n$DOCKER_COMMAND build -t aleph-client -f docker/Dockerfile .\n$DOCKER_COMMAND run -ti --rm --entrypoint /bin/bash -v \"$(pwd)\":/opt/aleph-client aleph-client\n" }, { "alpha_fraction": 0.6886503100395203, "alphanum_fraction": 0.6886503100395203, "avg_line_length": 17.11111068725586, "blob_id": "5e53bafbaec68f7914cbdf133e132915317f5746", "content_id": "4b70cb00bca30ae03c5d176eda6d5952588dda38", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "permissive", "max_line_length": 78, "num_lines": 36, "path": "/src/aleph_client/exceptions.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "from abc import ABC\n\n\nclass QueryError(ABC, ValueError):\n \"\"\"The result of an API query is inconsistent.\"\"\"\n\n pass\n\n\nclass MessageNotFoundError(QueryError):\n \"\"\"A message was expected but could not be found.\"\"\"\n\n pass\n\n\nclass MultipleMessagesError(QueryError):\n \"\"\"Multiple messages were found when a single message is expected.\"\"\"\n\n pass\n\n\nclass BroadcastError(Exception):\n \"\"\"\n Data could not be broadcast to the aleph.im network.\n \"\"\"\n\n pass\n\n\nclass InvalidMessageError(BroadcastError):\n \"\"\"\n The message could not be broadcast because it does not follow the aleph.im\n message specification.\n \"\"\"\n\n pass\n" }, { "alpha_fraction": 0.6857143044471741, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 16.5, "blob_id": "0e640ce6465d34324fa70e7c25568bc9bffdb453", "content_id": "ab3a8f755749d9383a7af09bbf318bc1a6f74c18", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 35, "license_type": "permissive", "max_line_length": 25, "num_lines": 2, "path": "/tests/integration/pytest.ini", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "[pytest]\npython_files = itest_*.py\n" }, { "alpha_fraction": 0.658703088760376, "alphanum_fraction": 0.6973834037780762, "avg_line_length": 20.975000381469727, "blob_id": "934478d6ea5d2a5f4afdca256678b883197b7bcc", "content_id": "d405e21db1607e00371ff2154eade2eb5806fd31", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 879, "license_type": "permissive", "max_line_length": 87, "num_lines": 40, "path": "/docker/with-ipfs.dockerfile", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "FROM python:3.9\n\n# === Install IPFS ===\nRUN apt-get install -y wget\nRUN wget https://ipfs.io/ipns/dist.ipfs.io/kubo/v0.15.0/kubo_v0.15.0_linux-amd64.tar.gz\nRUN tar -xvzf kubo_v0.15.0_linux-amd64.tar.gz -C /opt/\nRUN ln -s /opt/kubo/ipfs /usr/local/bin/\n\n# Volume to store IPFS data\nRUN mkdir /var/lib/ipfs\nENV IPFS_PATH /var/lib/ipfs\nVOLUME /var/lib/ipfs\n\n# IPFS Swarm\nEXPOSE 4001\n# IPFS WebUI\nEXPOSE 5001\n# IPFS Gateway\nEXPOSE 8080\n\n\n# === Install Aleph-Client ===\n\nRUN apt-get update && apt-get -y upgrade && apt-get install -y \\\n libsecp256k1-dev \\\n && rm -rf /var/lib/apt/lists/*\n\nRUN mkdir /opt/aleph-client/\nWORKDIR /opt/aleph-client/\nCOPY . .\n\nRUN pip install -e .[testing,ethereum]\n\n\n# - User 'aleph' to run the code itself\nRUN useradd --create-home -s /bin/bash aleph\nWORKDIR /home/aleph\n\nCOPY docker/with-ipfs.entrypoint.sh /entrypoint.sh\nCMD [\"/entrypoint.sh\"]\n" }, { "alpha_fraction": 0.7562604546546936, "alphanum_fraction": 0.7562604546546936, "avg_line_length": 32.27777862548828, "blob_id": "d5df55ebf00a7f69bf4623d894ac8505d4be45c6", "content_id": "0671bf24d8180460e47c3dbb8c29b90c78f56ae8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 599, "license_type": "permissive", "max_line_length": 76, "num_lines": 18, "path": "/tests/unit/test_utils.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "from aleph_message.models import (\n AggregateMessage,\n ForgetMessage,\n MessageType,\n PostMessage,\n ProgramMessage,\n StoreMessage,\n)\n\nfrom aleph_client.utils import get_message_type_value\n\n\ndef test_get_message_type_value():\n assert get_message_type_value(PostMessage) == MessageType.post\n assert get_message_type_value(AggregateMessage) == MessageType.aggregate\n assert get_message_type_value(StoreMessage) == MessageType.store\n assert get_message_type_value(ProgramMessage) == MessageType.program\n assert get_message_type_value(ForgetMessage) == MessageType.forget\n" }, { "alpha_fraction": 0.6785714030265808, "alphanum_fraction": 0.6913265585899353, "avg_line_length": 17.66666603088379, "blob_id": "4d8c7acfb34b5b596f45893cbfbf06f150069257", "content_id": "d3c5c12bb5300cba326e878551975d3d50f9b305", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 392, "license_type": "permissive", "max_line_length": 60, "num_lines": 21, "path": "/scripts/build-to-publish.sh", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nset -euf\n\n# Use Podman if installed, else use Docker\nif hash podman 2> /dev/null\nthen\n DOCKER_COMMAND=podman\nelse\n DOCKER_COMMAND=docker\nfi\n\nmkdir -p ./dist\nchmod 0777 ./dist\n\n$DOCKER_COMMAND build -t aleph-client -f docker/Dockerfile .\n$DOCKER_COMMAND run -ti --rm \\\n -w /opt/aleph-client \\\n -v \"$(pwd)/dist\":/opt/aleph-client/dist \\\n --entrypoint /bin/bash \\\n aleph-client\n" }, { "alpha_fraction": 0.4020618498325348, "alphanum_fraction": 0.4020618498325348, "avg_line_length": 12.857142448425293, "blob_id": "c894fd8d7a40a833ebe476e1fd414b4777d27d7f", "content_id": "a7c7459abfc74b8694e6f463f0b103e44bd53897", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 97, "license_type": "permissive", "max_line_length": 18, "num_lines": 7, "path": "/AUTHORS.rst", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "============\nContributors\n============\n\n* Henry Taieb <[email protected]>\n* Hugo Herter <[email protected]>\n* Moshe Malawach <[email protected]>\n" }, { "alpha_fraction": 0.7327459454536438, "alphanum_fraction": 0.7334801554679871, "avg_line_length": 30.674419403076172, "blob_id": "8d79143ccb72fcd8f5d312d8e9bfb0fe5ca6b6f3", "content_id": "9f1b4c2183d87c20c91b4580c4b29268300e040c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1362, "license_type": "permissive", "max_line_length": 77, "num_lines": 43, "path": "/tests/unit/conftest.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n Dummy conftest.py for aleph_client.\n\n If you don't know what this is for, just leave it empty.\n Read more about conftest.py under:\n https://pytest.org/latest/plugins.html\n\"\"\"\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\n\nimport aleph.sdk.chains.ethereum as ethereum\nimport aleph.sdk.chains.sol as solana\nimport aleph.sdk.chains.tezos as tezos\nimport pytest\nfrom aleph.sdk.chains.common import get_fallback_private_key\n\n\[email protected]\ndef fallback_private_key() -> bytes:\n with NamedTemporaryFile() as private_key_file:\n yield get_fallback_private_key(path=Path(private_key_file.name))\n\n\[email protected]\ndef ethereum_account() -> ethereum.ETHAccount:\n with NamedTemporaryFile(delete=False) as private_key_file:\n private_key_file.close()\n yield ethereum.get_fallback_account(path=Path(private_key_file.name))\n\n\[email protected]\ndef solana_account() -> solana.SOLAccount:\n with NamedTemporaryFile(delete=False) as private_key_file:\n private_key_file.close()\n yield solana.get_fallback_account(path=Path(private_key_file.name))\n\n\[email protected]\ndef tezos_account() -> tezos.TezosAccount:\n with NamedTemporaryFile(delete=False) as private_key_file:\n private_key_file.close()\n yield tezos.get_fallback_account(path=Path(private_key_file.name))\n" }, { "alpha_fraction": 0.637895941734314, "alphanum_fraction": 0.6413832902908325, "avg_line_length": 33.40999984741211, "blob_id": "6d617040725da8491e380e1f0813ee48dc0e0f93", "content_id": "4c14c9caab03c6b2b0dd87848e7a869058555204", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3441, "license_type": "permissive", "max_line_length": 83, "num_lines": 100, "path": "/src/aleph_client/commands/files.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import logging\nfrom pathlib import Path\nfrom typing import Optional\n\nimport typer\nfrom aleph.sdk import AuthenticatedAlephClient\nfrom aleph.sdk.account import _load_account\nfrom aleph.sdk.conf import settings as sdk_settings\nfrom aleph.sdk.types import AccountFromPrivateKey, StorageEnum\nfrom aleph_message.models import StoreMessage\nfrom aleph_message.status import MessageStatus\n\nfrom aleph_client.commands import help_strings\nfrom aleph_client.commands.utils import setup_logging\n\nlogger = logging.getLogger(__name__)\napp = typer.Typer()\n\n\[email protected]()\ndef pin(\n item_hash: str = typer.Argument(..., help=\"IPFS hash to pin on aleph.im\"),\n channel: Optional[str] = typer.Option(default=None, help=help_strings.CHANNEL),\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n ref: Optional[str] = typer.Option(None, help=help_strings.REF),\n debug: bool = False,\n):\n \"\"\"Persist a file from IPFS on aleph.im.\"\"\"\n\n setup_logging(debug)\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n result: StoreMessage\n status: MessageStatus\n result, status = client.create_store(\n file_hash=item_hash,\n storage_engine=StorageEnum.ipfs,\n channel=channel,\n ref=ref,\n )\n logger.debug(\"Upload finished\")\n typer.echo(f\"{result.json(indent=4)}\")\n\n\[email protected]()\ndef upload(\n path: Path = typer.Argument(..., help=\"Path of the file to upload\"),\n channel: Optional[str] = typer.Option(default=None, help=help_strings.CHANNEL),\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n ref: Optional[str] = typer.Option(None, help=help_strings.REF),\n debug: bool = False,\n):\n \"\"\"Upload and store a file on aleph.im.\"\"\"\n\n setup_logging(debug)\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n if not path.is_file():\n typer.echo(f\"Error: File not found: '{path}'\")\n raise typer.Exit(code=1)\n\n with open(path, \"rb\") as fd:\n logger.debug(\"Reading file\")\n # TODO: Read in lazy mode instead of copying everything in memory\n file_content = fd.read()\n storage_engine = (\n StorageEnum.ipfs\n if len(file_content) > 4 * 1024 * 1024\n else StorageEnum.storage\n )\n logger.debug(\"Uploading file\")\n result: StoreMessage\n status: MessageStatus\n result, status = client.create_store(\n file_content=file_content,\n storage_engine=storage_engine,\n channel=channel,\n guess_mime_type=True,\n ref=ref,\n )\n logger.debug(\"Upload finished\")\n typer.echo(f\"{result.json(indent=4)}\")\n" }, { "alpha_fraction": 0.5699481964111328, "alphanum_fraction": 0.6165803074836731, "avg_line_length": 33.803279876708984, "blob_id": "10bd65a056de885c13fca68aec21b458c7a948f0", "content_id": "4d84cd1d150b64b5d28c30fa42f4345f80f54f1c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2123, "license_type": "permissive", "max_line_length": 85, "num_lines": 61, "path": "/tests/unit/test_remote_account.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "from unittest.mock import patch\n\nimport pytest\nfrom aleph.sdk.chains.ethereum import ETHAccount\nfrom aleph.sdk.chains.remote import AccountProperties, RemoteAccount\n\n\[email protected]\nasync def test_remote_storage():\n host = \"http://localhost:8888\"\n private_key = (\n b\"xRR\\xd4P\\xdb9\\x93(U\\xa7\\xd5\\x81\\xba\\xc7\\x9fiT\"\n b\"\\xb8]\\x12\\x82 \\xd1\\x81\\xc8\\x94\\xf0\\xdav\\xbb\\xfb\"\n )\n local_account = ETHAccount(private_key=private_key)\n\n with patch(\"aiohttp.client.ClientSession\") as mock_session:\n mock_session.get.return_value.__aenter__.return_value.json.return_value = (\n AccountProperties(\n chain=\"ETH\",\n curve=\"secp256k1\",\n address=local_account.get_address(),\n public_key=local_account.get_public_key(),\n ).dict()\n )\n\n remote_account = await RemoteAccount.from_crypto_host(\n host=host, session=mock_session\n )\n\n assert remote_account.get_address() == local_account.get_address()\n assert remote_account.get_public_key() == local_account.get_public_key()\n\n # --- Test remote signing ---\n\n expected_signature = (\n \"0xa943de6c550ddf9cd1d3e58e77e9952b9f97e1bcb2c69\"\n \"a2f4ee56446dc8a38f02fb4a4e85c2d02efa26750456090\"\n \"3b983b4eef8b8030cc0d89550c18c69aef081c\"\n )\n message = {\n \"chain\": \"ETH\",\n \"sender\": local_account.get_address(),\n \"type\": \"POST\",\n \"item_hash\": \"HASH\",\n }\n expected_signed_message = {\n \"signature\": expected_signature,\n }\n expected_signed_message.update(message)\n mock_session.post.return_value.__aenter__.return_value.json.return_value = (\n expected_signed_message\n )\n\n signed_message = await remote_account.sign_message(message)\n\n assert set(signed_message.keys()) == set(message.keys()).union([\"signature\"])\n assert signed_message[\"signature\"] == expected_signature\n\n local_signed_message = await local_account.sign_message(message)\n assert signed_message == local_signed_message\n" }, { "alpha_fraction": 0.6505787372589111, "alphanum_fraction": 0.6704520583152771, "avg_line_length": 32.91851806640625, "blob_id": "831753531d1990ebc756048edeabaafde4adc8a0", "content_id": "d2d07bc3aa41e74b87b7f5411e2abe61c12aabca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4579, "license_type": "permissive", "max_line_length": 89, "num_lines": 135, "path": "/tests/unit/test_chain_nuls1_compat.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "\"\"\"The NULS1 implementation switched from lib `secp256k1` to `coincurve`.\n\nThis file tests that both implementations returns identical results.\n\"\"\"\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\n\nimport pytest\nimport secp256k1\nfrom aleph.sdk.chains.common import get_fallback_private_key\nfrom aleph.sdk.chains.nuls1 import LOGGER, MESSAGE_TEMPLATE, NulsSignature, VarInt\nfrom coincurve.keys import PrivateKey\n\nSECRET = (\n b\"\\xc4\\xfe\\xe65\\x96\\x14\\xb4:\\r: \\x05;\\x12j\\x9bJ\"\n b\"\\x14\\x0eY\\xe3BY\\x0f\\xd6\\xee\\xfc\\x9d\\xfe\\x8fv\\xbc\"\n)\n\n\nclass NulsSignatureSecp256k1(NulsSignature):\n @classmethod\n def sign_data_deprecated(cls, pri_key: bytes, digest_bytes: bytes):\n # TODO: Test compatibility and remove\n privkey = secp256k1.PrivateKey(\n pri_key, raw=True\n ) # we expect to have a private key as bytes. unhexlify it before passing.\n item = cls()\n item.pub_key = privkey.pubkey.serialize()\n item.digest_bytes = digest_bytes\n sig_check = privkey.ecdsa_sign(digest_bytes, raw=True)\n print(\"sig_check\", sig_check)\n item.sig_ser = privkey.ecdsa_serialize(sig_check)\n return item\n\n @classmethod\n def sign_message_deprecated(cls, pri_key: bytes, message):\n # TODO: Test compatibility and remove\n # we expect to have a private key as bytes. unhexlify it before passing\n privkey = secp256k1.PrivateKey(pri_key, raw=True)\n item = cls()\n message = VarInt(len(message)).encode() + message\n item.pub_key = privkey.pubkey.serialize()\n # item.digest_bytes = digest_bytes\n sig_check = privkey.ecdsa_sign(MESSAGE_TEMPLATE.format(message).encode())\n item.sig_ser = privkey.ecdsa_serialize(sig_check)\n return item\n\n def verify_deprecated(self, message):\n pub = secp256k1.PublicKey(self.pub_key, raw=True)\n message = VarInt(len(message)).encode() + message\n print(\"message\", message)\n # LOGGER.debug(\"Comparing with %r\" % (MESSAGE_TEMPLATE.format(message).encode()))\n try:\n sig_raw = pub.ecdsa_deserialize(self.sig_ser)\n good = pub.ecdsa_verify(MESSAGE_TEMPLATE.format(message).encode(), sig_raw)\n except Exception:\n LOGGER.exception(\"Verification failed\")\n good = False\n return good\n\n\ndef test_sign_data_deprecated():\n \"\"\"Test the data signature\"\"\"\n data = None\n signature = NulsSignature(data=data)\n\n with NamedTemporaryFile() as private_key_file:\n private_key = get_fallback_private_key(path=Path(private_key_file.name))\n\n assert signature\n sign_deprecated: NulsSignatureSecp256k1 = (\n NulsSignatureSecp256k1.sign_data_deprecated(\n pri_key=private_key, digest_bytes=b\"x\" * (256 // 8)\n )\n )\n assert sign_deprecated\n\n\[email protected]\nasync def test_compare_sign_data():\n private_key = PrivateKey(SECRET)\n\n sign: NulsSignature = NulsSignature.sign_data(\n pri_key=private_key.secret, digest_bytes=b\"x\" * (256 // 8)\n )\n\n sign_deprecated: NulsSignatureSecp256k1 = (\n NulsSignatureSecp256k1.sign_data_deprecated(\n pri_key=private_key.secret, digest_bytes=b\"x\" * (256 // 8)\n )\n )\n\n assert sign.sig_ser is not None\n assert sign_deprecated.sig_ser is not None\n assert len(sign.sig_ser) == len(sign_deprecated.sig_ser)\n assert sign.sig_ser == sign_deprecated.sig_ser\n assert sign == sign_deprecated\n\n\[email protected]\nasync def test_compare_sign_message():\n private_key = PrivateKey(SECRET)\n message = b\"GOOD\"\n\n sign: NulsSignature = await NulsSignature.sign_message(\n pri_key=private_key.secret, message=message\n )\n\n sign_deprecated: NulsSignatureSecp256k1 = (\n NulsSignatureSecp256k1.sign_message_deprecated(\n pri_key=private_key.secret, message=message\n )\n )\n\n assert sign.sig_ser is not None\n assert sign_deprecated.sig_ser is not None\n assert len(sign.sig_ser) == len(sign_deprecated.sig_ser)\n assert sign.sig_ser == sign_deprecated.sig_ser\n assert sign == sign_deprecated\n\n\[email protected]\nasync def test_verify():\n private_key = PrivateKey(SECRET)\n message = b\"GOOD\"\n\n sign: NulsSignatureSecp256k1 = await NulsSignatureSecp256k1.sign_message(\n pri_key=private_key.secret, message=message\n )\n\n assert sign.verify(message=message)\n assert not sign.verify(message=b\"BAD\")\n\n assert sign.verify_deprecated(message=message)\n assert not sign.verify_deprecated(message=b\"BAD\")\n" }, { "alpha_fraction": 0.7169973850250244, "alphanum_fraction": 0.7204486727714539, "avg_line_length": 27.975000381469727, "blob_id": "0faa99c09ab6b08dcdddeb497cddf06ea2dd7236", "content_id": "e5e9342062c49ff8195e0f8f9a8b57c8073b0cd9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1159, "license_type": "permissive", "max_line_length": 94, "num_lines": 40, "path": "/docs/content/programs.rst", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": ".. _posts:\n\n========\nPrograms\n========\n\nPrograms are special entries that define code to run on Aleph.im virtual machines.\n\nAleph.im currently supports programs written in Python that follow the\n`ASGI interface <https://asgi.readthedocs.io/en/latest/introduction.html>`_.\n\nIn practice, the easiest approach is to use an\n`ASGI compatible web framework <https://asgi.readthedocs.io/en/latest/implementations.html>`_,\nsuch as\n`FastAPI <https://fastapi.tiangolo.com/>`_ or\n`Django <https://www.djangoproject.com/>`_.\n\nCreating a program\n------------------\n\nFollow the `FastAPI Tutorial <https://fastapi.tiangolo.com/tutorial/>`_\nto create your first program and test it using uvicorn.\n\nRunning on Aleph.im\n-------------------\n\nUse the :ref:`cli` to upload your program.\n\nIn this example, we will upload the\n`example_fastapi_2 example from Aleph-VM\n<https://github.com/aleph-im/aleph-vm/tree/main/examples/example_fastapi_2>`_.\n\n.. code-block:: bash\n\n python3 -m aleph_client program /tmp/aleph-vm/examples/example_fastapi_2 __init__:app\n\nThe command will output two URLs:\n\n- A URL link to see the message definition of your program\n- A URL to run your program\n" }, { "alpha_fraction": 0.5956341028213501, "alphanum_fraction": 0.5977131128311157, "avg_line_length": 19.46808433532715, "blob_id": "b1ff1cd99cfb372d1bdbc1c32e3eb3c5ae2e5b65", "content_id": "e805e7d72d9506b5d50fa3d2e78205d00095655e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1924, "license_type": "permissive", "max_line_length": 80, "num_lines": 94, "path": "/docs/content/cli.rst", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": ".. _posts:\n\n========\nCommand-line Interface\n========\n\nAleph-client can be used as a command-line interface to some Aleph.im\nfunctionalities.\n\nThe following commands are available:\n\nPost\n----\n\nPost a message on Aleph.im.\n\nThe content must be JSON encoded and is obtained either from a file\nor from a user prompt.\n\n.. code-block:: bash\n\n python3 -m aleph_client post [OPTIONS]\n\n Post a message on Aleph.im.\n\n Options:\n --path TEXT\n --type TEXT [default: test]\n --channel TEXT [default: TEST]\n --private-key TEXT\n --private-key-file TEXT\n --help Show this message and exit.\n\n\nUpload\n------\n\nUpload and store a file on Aleph.im.\n\n.. code-block:: bash\n\n python3 -m aleph_client upload [OPTIONS] PATH\n\n Upload and store a file on Aleph.im.\n\n Arguments:\n PATH [required]\n\n Options:\n --channel TEXT [default: TEST]\n --private-key TEXT\n --private-key-file TEXT\n --help Show this message and exit.\n\nPin\n---\n\nPersist a file from IPFS on Aleph.im.\n\n.. code-block:: bash\n\n python3 -m aleph_client pin [OPTIONS] HASH\n\n Persist a file from IPFS on Aleph.im.\n\n Arguments:\n HASH [required]\n\n Options:\n --channel TEXT [default: TEST]\n --private-key TEXT\n --private-key-file TEXT\n --help Show this message and exit.\n\nProgram\n-------\n\nRegister a program to run on Aleph.im virtual machines from a zip archive.\n\n.. code-block:: bash\n\n python3 -m aleph_client program [OPTIONS] PATH ENTRYPOINT\n\n Register a program to run on Aleph.im virtual machines from a zip archive.\n\n Arguments:\n PATH [required]\n ENTRYPOINT [required]\n\n Options:\n --channel TEXT [default: TEST]\n --private-key TEXT\n --private-key-file TEXT\n --help Show this message and exit.\n" }, { "alpha_fraction": 0.697050929069519, "alphanum_fraction": 0.711796224117279, "avg_line_length": 30.08333396911621, "blob_id": "6b6c1c72302101f956ff816cc565bd6756a836a2", "content_id": "5f8874f6fc7c1d9d0bc2c408adefcb2c90cb0bd7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2238, "license_type": "permissive", "max_line_length": 101, "num_lines": 72, "path": "/tests/unit/test_chain_solana.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import json\nfrom dataclasses import asdict, dataclass\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\n\nimport base58\nimport pytest\nfrom aleph.sdk.chains.common import get_verification_buffer\nfrom aleph.sdk.chains.sol import SOLAccount, get_fallback_account\nfrom nacl.signing import VerifyKey\n\n\n@dataclass\nclass Message:\n chain: str\n sender: str\n type: str\n item_hash: str\n\n\ndef test_get_fallback_account():\n with NamedTemporaryFile() as private_key_file:\n account: SOLAccount = get_fallback_account(path=Path(private_key_file.name))\n\n assert account.CHAIN == \"SOL\"\n assert account.CURVE == \"curve25519\"\n assert account._signing_key.verify_key\n assert type(account.private_key) == bytes\n assert len(account.private_key) == 32\n\n\[email protected]\nasync def test_SOLAccount(solana_account):\n message = asdict(\n Message(\"SOL\", solana_account.get_address(), \"SomeType\", \"ItemHash\")\n )\n initial_message = message.copy()\n await solana_account.sign_message(message)\n assert message[\"signature\"]\n\n address = message[\"sender\"]\n assert address\n assert type(address) == str\n # assert len(address) == 44 # can also be 43?\n signature = json.loads(message[\"signature\"])\n\n pubkey = base58.b58decode(signature[\"publicKey\"])\n assert type(pubkey) == bytes\n assert len(pubkey) == 32\n\n # modeled according to https://github.com/aleph-im/pyaleph/blob/master/src/aleph/chains/solana.py\n verify_key = VerifyKey(pubkey)\n verification_buffer = get_verification_buffer(message)\n assert get_verification_buffer(initial_message) == verification_buffer\n verif = verify_key.verify(\n verification_buffer, signature=base58.b58decode(signature[\"signature\"])\n )\n\n assert verif == verification_buffer\n assert message[\"sender\"] == signature[\"publicKey\"]\n\n\[email protected]\nasync def test_decrypt_curve25516(solana_account):\n assert solana_account.CURVE == \"curve25519\"\n content = b\"SomeContent\"\n\n encrypted = await solana_account.encrypt(content)\n assert type(encrypted) == bytes\n decrypted = await solana_account.decrypt(encrypted)\n assert type(decrypted) == bytes\n assert content == decrypted\n" }, { "alpha_fraction": 0.7578616142272949, "alphanum_fraction": 0.7578616142272949, "avg_line_length": 62.599998474121094, "blob_id": "de66d79987c90d4e1bf636c1f7d26307cc8b0c5c", "content_id": "53773cf7d38d9496a8f4d5c337171f3d731545ec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "permissive", "max_line_length": 83, "num_lines": 5, "path": "/src/aleph_client/commands/help_strings.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "IPFS_HASH = \"IPFS Content identifier (CID)\"\nCHANNEL = \"Aleph.im network channel where the message is located\"\nPRIVATE_KEY = \"Your private key. Cannot be used with --private-key-file\"\nPRIVATE_KEY_FILE = \"Path to your private key file\"\nREF = \"Checkout https://aleph-im.gitbook.io/aleph-js/api-resources-reference/posts\"\n" }, { "alpha_fraction": 0.7088444232940674, "alphanum_fraction": 0.7211104035377502, "avg_line_length": 28.226415634155273, "blob_id": "b5baf9de055504d8ad61704d608ac6cb67b1610a", "content_id": "fe4e671bcd4bd5fa5882a29dd0a842026df153c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1549, "license_type": "permissive", "max_line_length": 86, "num_lines": 53, "path": "/tests/unit/test_chain_tezos.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "from dataclasses import asdict, dataclass\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\n\nimport pytest\nfrom aleph.sdk.chains.tezos import TezosAccount, get_fallback_account\n\n\n@dataclass\nclass Message:\n chain: str\n sender: str\n type: str\n item_hash: str\n\n\ndef test_get_fallback_account(tezos_account: TezosAccount):\n with NamedTemporaryFile() as private_key_file:\n account: TezosAccount = get_fallback_account(path=Path(private_key_file.name))\n\n assert account.CHAIN == \"TEZOS\"\n assert account.CURVE == \"secp256k1\"\n assert account._account.public_key()\n\n\[email protected]\nasync def test_tezos_account(tezos_account: TezosAccount):\n\n message = Message(\"TEZOS\", tezos_account.get_address(), \"SomeType\", \"ItemHash\")\n signed = await tezos_account.sign_message(asdict(message))\n assert signed[\"signature\"]\n assert len(signed[\"signature\"]) == 188\n\n address = tezos_account.get_address()\n assert address is not None\n assert isinstance(address, str)\n assert len(address) == 36\n\n pubkey = tezos_account.get_public_key()\n assert isinstance(pubkey, str)\n assert len(pubkey) == 55\n\n\[email protected]\nasync def test_decrypt_secp256k1(tezos_account: TezosAccount):\n assert tezos_account.CURVE == \"secp256k1\"\n content = b\"SomeContent\"\n\n encrypted = await tezos_account.encrypt(content)\n assert isinstance(encrypted, bytes)\n decrypted = await tezos_account.decrypt(encrypted)\n assert isinstance(decrypted, bytes)\n assert content == decrypted\n" }, { "alpha_fraction": 0.680497944355011, "alphanum_fraction": 0.6820539236068726, "avg_line_length": 31.67796516418457, "blob_id": "732d25dcc97dce614162dc6167d3bf4ce7251191", "content_id": "8f13cc31b6e16374e101b9d8de315d1650879d51", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1928, "license_type": "permissive", "max_line_length": 103, "num_lines": 59, "path": "/tests/integration/itest_posts.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import pytest\nfrom aleph_message.models import PostMessage\nfrom aleph.sdk.exceptions import MessageNotFoundError\nfrom tests.integration.toolkit import try_until\nfrom .config import REFERENCE_NODE, TARGET_NODE\nfrom aleph.sdk import AuthenticatedAlephClient\nfrom aleph.sdk.conf import settings as sdk_settings\nfrom aleph.sdk import AlephClient\nfrom aleph_message.status import MessageStatus\n\n\nasync def get_message(item_hash: str):\n async with AlephClient(api_server=sdk_settings.API_HOST) as client:\n try:\n response = await client.get_message(item_hash, message_type=PostMessage)\n return response\n except MessageNotFoundError:\n return None\n\n\nasync def create_message_on_target(\n fixture_account, emitter_node: str, receiver_node: str\n):\n \"\"\"\n Create a POST message on the target node, then fetch it from the reference node.\n \"\"\"\n data = {\"content\": \"test\"}\n async with AuthenticatedAlephClient(\n account=fixture_account, api_server=sdk_settings.API_HOST\n ) as client:\n message, status = await client.create_post(\n post_content=data,\n post_type=\"POST\",\n ref=None,\n channel=\"INTEGRATION_TESTS\",\n inline=True,\n sync=True,\n )\n\n response = await try_until(\n get_message,\n lambda r: r is not None and r.content is not None,\n timeout=5,\n time_between_attempts=0.5,\n item_hash=message.item_hash,\n )\n assert status == MessageStatus.PROCESSED\n assert response.content == message.content\n\n\[email protected]\nasync def test_create_message_on_target(fixture_account):\n \"\"\"\n Attempts to create a new message on the target node and verifies if the message can be fetched from\n the reference node.\n \"\"\"\n await create_message_on_target(\n fixture_account, emitter_node=REFERENCE_NODE, receiver_node=TARGET_NODE\n )\n" }, { "alpha_fraction": 0.5867680907249451, "alphanum_fraction": 0.5949938297271729, "avg_line_length": 36.56146240234375, "blob_id": "9df74cd18be5b0910e6c425561b7cfee162de7f7", "content_id": "82e9a0c4861bdee0add99a61d44a62b326394e37", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11306, "license_type": "permissive", "max_line_length": 159, "num_lines": 301, "path": "/src/aleph_client/commands/program.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import json\nimport logging\nfrom base64 import b16decode, b32encode\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\nfrom zipfile import BadZipFile\n\nimport typer\nfrom aleph.sdk import AuthenticatedAlephClient\nfrom aleph.sdk.account import _load_account\nfrom aleph.sdk.conf import settings as sdk_settings\nfrom aleph.sdk.types import AccountFromPrivateKey, StorageEnum\nfrom aleph_message.models import (\n ItemHash,\n MessagesResponse,\n ProgramContent,\n ProgramMessage,\n StoreMessage,\n)\nfrom aleph_message.status import MessageStatus\n\nfrom aleph_client.commands import help_strings\nfrom aleph_client.commands.utils import (\n input_multiline,\n prompt_for_volumes,\n setup_logging,\n volume_to_dict,\n yes_no_input,\n)\nfrom aleph_client.conf import settings\nfrom aleph_client.utils import create_archive\n\nlogger = logging.getLogger(__name__)\napp = typer.Typer()\n\n\[email protected]()\ndef upload(\n path: Path = typer.Argument(..., help=\"Path to your source code\"),\n entrypoint: str = typer.Argument(..., help=\"Your program entrypoint\"),\n channel: Optional[str] = typer.Option(default=None, help=help_strings.CHANNEL),\n memory: int = typer.Option(\n sdk_settings.DEFAULT_VM_MEMORY, help=\"Maximum memory allocation on vm in MiB\"\n ),\n vcpus: int = typer.Option(\n sdk_settings.DEFAULT_VM_VCPUS, help=\"Number of virtual cpus to allocate.\"\n ),\n timeout_seconds: float = typer.Option(\n sdk_settings.DEFAULT_VM_TIMEOUT,\n help=\"If vm is not called after [timeout_seconds] it will shutdown\",\n ),\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n print_messages: bool = typer.Option(False),\n print_code_message: bool = typer.Option(False),\n print_program_message: bool = typer.Option(False),\n runtime: str = typer.Option(\n None,\n help=\"Hash of the runtime to use for your program. Defaults to aleph debian with Python3.8 and node. You can also create your own runtime and pin it\",\n ),\n beta: bool = typer.Option(False),\n debug: bool = False,\n persistent: bool = False,\n persistent_volume: Optional[List[str]] = typer.Option(\n None,\n help=\"\"\"Takes 3 parameters \n A persistent volume is allocated on the host machine at any time \n eg: Use , to seperate the parameters and no spaces \n --persistent_volume persistence=host,name=my-volume,size=100 ./my-program main:app\n \"\"\",\n ),\n ephemeral_volume: Optional[List[str]] = typer.Option(\n None,\n help=\"\"\"Takes 1 parameter Only \n Ephemeral volumes can move and be removed by the host,Garbage collected basically, when the VM isn't running \n eg: Use , to seperate the parameters and no spaces \n --ephemeral-volume size_mib=100 ./my-program main:app \"\"\",\n ),\n immutable_volume: Optional[List[str]] = typer.Option(\n None,\n help=\"\"\"Takes 3 parameters \n Immutable volume is one whose contents do not change \n eg: Use , to seperate the parameters and no spaces \n --immutable-volume ref=25a393222692c2f73489dc6710ae87605a96742ceef7b91de4d7ec34bb688d94,use_latest=true,mount=/mnt/volume ./my-program main:app\n \"\"\",\n ),\n):\n \"\"\"Register a program to run on aleph.im virtual machines from a zip archive.\"\"\"\n\n setup_logging(debug)\n\n path = path.absolute()\n\n try:\n path_object, encoding = create_archive(path)\n except BadZipFile:\n typer.echo(\"Invalid zip archive\")\n raise typer.Exit(3)\n except FileNotFoundError:\n typer.echo(\"No such file or directory\")\n raise typer.Exit(4)\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n\n runtime = (\n runtime\n or input(f\"Ref of runtime ? [{sdk_settings.DEFAULT_RUNTIME_ID}] \")\n or sdk_settings.DEFAULT_RUNTIME_ID\n )\n\n volumes = []\n\n # Check if the volumes are empty\n if (\n persistent_volume is None\n or ephemeral_volume is None\n or immutable_volume is None\n ):\n for volume in prompt_for_volumes():\n volumes.append(volume)\n typer.echo(\"\\n\")\n\n # else Parse all the volumes that have passed as the cli parameters and put it into volume list\n else:\n if len(persistent_volume) > 0:\n persistent_volume_dict = volume_to_dict(volume=persistent_volume)\n volumes.append(persistent_volume_dict)\n if len(ephemeral_volume) > 0:\n ephemeral_volume_dict = volume_to_dict(volume=ephemeral_volume)\n volumes.append(ephemeral_volume_dict)\n if len(immutable_volume) > 0:\n immutable_volume_dict = volume_to_dict(volume=immutable_volume)\n volumes.append(immutable_volume_dict)\n\n subscriptions: Optional[List[Dict]]\n if beta and yes_no_input(\"Subscribe to messages ?\", default=False):\n content_raw = input_multiline()\n try:\n subscriptions = json.loads(content_raw)\n except json.decoder.JSONDecodeError:\n typer.echo(\"Not valid JSON\")\n raise typer.Exit(code=2)\n else:\n subscriptions = None\n\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n # Upload the source code\n with open(path_object, \"rb\") as fd:\n logger.debug(\"Reading file\")\n # TODO: Read in lazy mode instead of copying everything in memory\n file_content = fd.read()\n storage_engine = (\n StorageEnum.ipfs\n if len(file_content) > 4 * 1024 * 1024\n else StorageEnum.storage\n )\n logger.debug(\"Uploading file\")\n user_code: StoreMessage\n status: MessageStatus\n user_code, status = client.create_store(\n file_content=file_content,\n storage_engine=storage_engine,\n channel=channel,\n guess_mime_type=True,\n ref=None,\n )\n logger.debug(\"Upload finished\")\n if print_messages or print_code_message:\n typer.echo(f\"{user_code.json(indent=4)}\")\n program_ref = user_code.item_hash\n\n # Register the program\n message, status = client.create_program(\n program_ref=program_ref,\n entrypoint=entrypoint,\n runtime=runtime,\n storage_engine=StorageEnum.storage,\n channel=channel,\n memory=memory,\n vcpus=vcpus,\n timeout_seconds=timeout_seconds,\n persistent=persistent,\n encoding=encoding,\n volumes=volumes,\n subscriptions=subscriptions,\n )\n logger.debug(\"Upload finished\")\n if print_messages or print_program_message:\n typer.echo(f\"{message.json(indent=4)}\")\n\n item_hash: ItemHash = message.item_hash\n hash_base32 = (\n b32encode(b16decode(item_hash.upper())).strip(b\"=\").lower().decode()\n )\n\n typer.echo(\n f\"Your program has been uploaded on aleph.im .\\n\\n\"\n \"Available on:\\n\"\n f\" {settings.VM_URL_PATH.format(hash=item_hash)}\\n\"\n f\" {settings.VM_URL_HOST.format(hash_base32=hash_base32)}\\n\"\n \"Visualise on:\\n https://explorer.aleph.im/address/\"\n f\"{message.chain}/{message.sender}/message/PROGRAM/{item_hash}\\n\"\n )\n\n\[email protected]()\ndef update(\n item_hash: str,\n path: Path,\n private_key: Optional[str] = sdk_settings.PRIVATE_KEY_STRING,\n private_key_file: Optional[Path] = sdk_settings.PRIVATE_KEY_FILE,\n print_message: bool = True,\n debug: bool = False,\n):\n \"\"\"Update the code of an existing program\"\"\"\n\n setup_logging(debug)\n\n account = _load_account(private_key, private_key_file)\n path = path.absolute()\n\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n program_message: ProgramMessage = client.get_message(\n item_hash=item_hash, message_type=ProgramMessage\n )\n code_ref = program_message.content.code.ref\n code_message: StoreMessage = client.get_message(\n item_hash=code_ref, message_type=StoreMessage\n )\n\n try:\n path, encoding = create_archive(path)\n except BadZipFile:\n typer.echo(\"Invalid zip archive\")\n raise typer.Exit(3)\n except FileNotFoundError:\n typer.echo(\"No such file or directory\")\n raise typer.Exit(4)\n\n if encoding != program_message.content.code.encoding:\n logger.error(\n f\"Code must be encoded with the same encoding as the previous version \"\n f\"('{encoding}' vs '{program_message.content.code.encoding}'\"\n )\n raise typer.Exit(1)\n\n # Upload the source code\n with open(path, \"rb\") as fd:\n logger.debug(\"Reading file\")\n # TODO: Read in lazy mode instead of copying everything in memory\n file_content = fd.read()\n logger.debug(\"Uploading file\")\n message, status = client.create_store(\n file_content=file_content,\n storage_engine=code_message.content.item_type,\n channel=code_message.channel,\n guess_mime_type=True,\n ref=code_message.item_hash,\n )\n logger.debug(\"Upload finished\")\n if print_message:\n typer.echo(f\"{message.json(indent=4)}\")\n\n\[email protected]()\ndef unpersist(\n item_hash: str,\n private_key: Optional[str] = sdk_settings.PRIVATE_KEY_STRING,\n private_key_file: Optional[Path] = sdk_settings.PRIVATE_KEY_FILE,\n debug: bool = False,\n):\n \"\"\"Stop a persistent virtual machine by making it non-persistent\"\"\"\n\n setup_logging(debug)\n\n account = _load_account(private_key, private_key_file)\n\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n existing: MessagesResponse = client.get_messages(hashes=[item_hash])\n message: ProgramMessage = existing.messages[0]\n content: ProgramContent = message.content.copy()\n\n content.on.persistent = False\n content.replaces = message.item_hash\n\n message, _status = client.submit(\n content=content.dict(exclude_none=True),\n message_type=message.type,\n channel=message.channel,\n )\n typer.echo(f\"{message.json(indent=4)}\")\n" }, { "alpha_fraction": 0.661066472530365, "alphanum_fraction": 0.6756756901741028, "avg_line_length": 23.89090919494629, "blob_id": "85f34feb1e32c6e760fb1152f2b55f1d3f909620", "content_id": "d5ef53b092bc3c2cdad9f457b1bb82c6f77a1787", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1369, "license_type": "permissive", "max_line_length": 66, "num_lines": 55, "path": "/tests/unit/test_chain_nuls1.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "import pytest\nfrom aleph.sdk.chains.nuls1 import NulsSignature\nfrom coincurve.keys import PrivateKey\n\nSECRET = (\n b\"\\xc4\\xfe\\xe65\\x96\\x14\\xb4:\\r: \\x05;\\x12j\\x9bJ\"\n b\"\\x14\\x0eY\\xe3BY\\x0f\\xd6\\xee\\xfc\\x9d\\xfe\\x8fv\\xbc\"\n)\n\n\[email protected]\nasync def test_sign_data():\n private_key = PrivateKey(SECRET)\n\n sign: NulsSignature = NulsSignature.sign_data(\n pri_key=private_key.secret, digest_bytes=b\"x\" * (256 // 8)\n )\n\n assert sign\n assert type(sign.pub_key) == bytes\n assert type(sign.digest_bytes) == bytes\n assert type(sign.sig_ser) == bytes\n assert sign.ecc_type == None\n\n\[email protected]\nasync def test_sign_message():\n private_key = PrivateKey(SECRET)\n message = b\"GOOD\"\n\n sign: NulsSignature = await NulsSignature.sign_message(\n pri_key=private_key.secret, message=message\n )\n\n assert sign.sig_ser is not None\n assert len(sign.sig_ser) == 70\n\n assert sign\n assert type(sign.pub_key) == bytes\n assert sign.digest_bytes == None\n assert type(sign.sig_ser) == bytes\n assert sign.ecc_type == None\n\n\[email protected]\nasync def test_verify():\n private_key = PrivateKey(SECRET)\n message = b\"GOOD\"\n\n sign: NulsSignature = await NulsSignature.sign_message(\n pri_key=private_key.secret, message=message\n )\n\n assert sign.verify(message=message)\n assert not sign.verify(message=b\"BAD\")\n" }, { "alpha_fraction": 0.6846038699150085, "alphanum_fraction": 0.6846038699150085, "avg_line_length": 21.299999237060547, "blob_id": "cc6c1c010243c30c27a2250bfec25d098ba3bacb", "content_id": "3a414d0b3ea75a5ff0b506eac79ec745a4fdd0e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "permissive", "max_line_length": 82, "num_lines": 30, "path": "/src/aleph_client/__main__.py", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "\"\"\"\nAleph Client command-line interface.\n\"\"\"\n\nimport typer\n\nfrom .commands import account, aggregate, files, message, program\n\napp = typer.Typer()\n\napp.add_typer(account.app, name=\"account\", help=\"Manage account\")\napp.add_typer(\n aggregate.app, name=\"aggregate\", help=\"Manage aggregate messages on aleph.im\"\n)\n\napp.add_typer(\n files.app, name=\"file\", help=\"File uploading and pinning on IPFS and aleph.im\"\n)\napp.add_typer(\n message.app,\n name=\"message\",\n help=\"Post, amend, watch and forget messages on aleph.im\",\n)\napp.add_typer(\n program.app, name=\"program\", help=\"Upload and update programs on aleph.im VM\"\n)\n\n\nif __name__ == \"__main__\":\n app()\n" }, { "alpha_fraction": 0.707064151763916, "alphanum_fraction": 0.7232663631439209, "avg_line_length": 31.787233352661133, "blob_id": "7e6a4ec42683ea0ccf85b142be9a40c46cf60735", "content_id": "0598428765ed74108a68b0aff4fe8f80b8ba0f9f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1543, "license_type": "permissive", "max_line_length": 74, "num_lines": 47, "path": "/docs/content/async_notes.rst", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "=============\nAsync vs Sync\n=============\n\nAt aleph.im we really like coding using asyncio,\nusing async/await construct on Python 3.\n\nThat being said, we totally understand that you might not\nhave the same opinion, or that you might not be in a position\nto use it.\n\nFor this reason, all the functions have an async version\nand a sync version. The sync version are actually\ncalling the async code behing your back (sneaky!) so you might\nbe careful if you are calling it in an environment where you\nalready have an asyncio loop used.\n\nMost chain specific code is synchronous, and core aleph.im interaction\nmight by async.\n\nSync code have to be imported from :py:mod:`aleph_client.synchronous`,\nasync code from :py:mod:`aleph_client.asynchronous`, with\nsame functions names.\n\naiohttp session\n---------------\n\nMost of the rest interface interaction code is based on aiohttp.\nFor simplicity sake, if there isn't a passed aiohttp session,\nthe async functions needing it will instanciate one as a singleton\nand reuse it.\n\nThere is a lot of use cases where you might prefer to use your own version\ninstead. Most functions will allow you to do so, by passing a session arg.\n\nExample:\n\n.. code-block:: python3\n\n >>> import aiohttp\n >>> from aleph_client.asynchronous import fetch_aggregate\n >>> async with aiohttp.ClientSession() as session:\n ... await fetch_aggregate(\n ... \"0x06DE0C46884EbFF46558Cd1a9e7DA6B1c3E9D0a8\",\n ... \"profile\", session=session)\n ...\n {\"bio\": \"tester\", \"name\": \"Moshe on Ethereum\"} \n\n" }, { "alpha_fraction": 0.7369294762611389, "alphanum_fraction": 0.7510373592376709, "avg_line_length": 27.690475463867188, "blob_id": "72715459e2dc90c2c546f960b398001af76a5ac7", "content_id": "560c98af5c6252a0ce1332e62c01190c409127c5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1205, "license_type": "permissive", "max_line_length": 118, "num_lines": 42, "path": "/docker/Dockerfile", "repo_name": "aleph-im/aleph-client", "src_encoding": "UTF-8", "text": "FROM python:3.9-bullseye\nMAINTAINER The aleph.im project\n\nRUN apt-get update && apt-get -y upgrade && apt-get install -y \\\n libsecp256k1-dev \\\n && rm -rf /var/lib/apt/lists/*\n\nRUN useradd -s /bin/bash --create-home user\nRUN mkdir /opt/venv\nRUN mkdir /opt/aleph-client/\nRUN chown user:user /opt/venv\nRUN chown user:user /opt/aleph-client\n\nUSER user\nRUN python3.9 -m venv /opt/venv\nENV PATH=\"/opt/venv/bin:$PATH\"\nENV PATH=\"/opt/venv/bin:$PATH\"\n\nRUN pip install --upgrade pip wheel twine\n\n# Preinstall dependencies for faster steps\nRUN pip install --upgrade secp256k1 coincurve aiohttp eciespy python-magic typer\nRUN pip install --upgrade 'aleph-message~=0.3.2' eth_account pynacl base58\nRUN pip install --upgrade pytest pytest-cov pytest-asyncio mypy types-setuptools pytest-asyncio fastapi httpx requests\n\nWORKDIR /opt/aleph-client/\nCOPY . .\nUSER root\nRUN chown -R user:user /opt/aleph-client\n\nRUN git config --global --add safe.directory /opt/aleph-client\nRUN pip install -e .[testing,ethereum,solana,tezos]\n\nRUN mkdir /data\nRUN chown user:user /data\nENV ALEPH_PRIVATE_KEY_FILE=/data/secret.key\n\nWORKDIR /home/user\nUSER user\nRUN aleph --install-completion bash\nENTRYPOINT [\"aleph\"]\nCMD [\"--help\"]\n" } ]
52
kianahs/Artificial-nueral-network
https://github.com/kianahs/Artificial-nueral-network
b3e3abd12afb015914db69f217ce47288f56e9d1
028329933e61a5460fbafe762de25033353565fa
56b33c905645609bf4ec184e3cccf654bf479720
refs/heads/main
2023-04-10T15:31:50.122683
2021-04-23T07:26:02
2021-04-23T07:26:02
360,798,672
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5724328756332397, "alphanum_fraction": 0.5938688516616821, "avg_line_length": 28.88612174987793, "blob_id": "fd3231de449484f08d618d606a144c72d709fac5", "content_id": "c8928a4a693abbb5e052b292a05123a8da610ed4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8677, "license_type": "no_license", "max_line_length": 148, "num_lines": 281, "path": "/part123.py", "repo_name": "kianahs/Artificial-nueral-network", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom numpy import random\r\n# import random\r\nimport time\r\n\r\nclass layer:\r\n\r\n k=0\r\n n=0\r\n number=0\r\n z=[]\r\n weights=[]\r\n bias=[]\r\n\r\n def __init__(self, n, k ,number):\r\n self.n = n\r\n self.k = k\r\n self.number=number\r\n self.weights = random.normal(size=(self.k, self.n))\r\n # self.z=np.zeros(shape=(self.k, 1))\r\n self.bias = np.zeros(shape=(self.k, 1))\r\n\r\n def get_weights(self):\r\n return self.weights\r\n\r\n def set_weights(self,updated_value):\r\n self.weights=updated_value\r\n\r\n def get_bias(self):\r\n return self.bias\r\n\r\n def set_bias(self,updated_value):\r\n self.bias=updated_value\r\n\r\n def calculate_next_nodes(self , present_a):\r\n\r\n self.z=np.add(np.matmul(self.weights , present_a),self.bias)\r\n result=sigmoid(self.z)\r\n return result\r\n\r\n\r\n def calculate_gradian_of_cost_W(self,new_a,output,present_a):\r\n\r\n grad_w = np.zeros(shape=(self.k ,self.n))\r\n\r\n if self.number==3:\r\n for j in range(self.k):\r\n for i in range(self.n):\r\n grad_w[j][i]= 2 * (new_a[j] - output[j]) * sigmoid_derivative(self.z[j]) * present_a[i]\r\n\r\n\r\n else:\r\n for j in range(self.k):\r\n for i in range(self.n):\r\n grad_w[j][i] = (output[j]) * sigmoid_derivative(self.z[j]) * present_a[i]\r\n\r\n return grad_w\r\n\r\n def calculate_gradian_of_cost_B(self,new_a,output,present_a):\r\n grad_b = np.zeros(shape=(self.k, 1))\r\n\r\n if self.number == 3:\r\n for j in range(self.k):\r\n grad_b[j] = 2 * (new_a[j] - output[j]) * sigmoid_derivative(self.z[j])\r\n\r\n else:\r\n\r\n for j in range(self.k):\r\n grad_b[j]=output[j] * sigmoid_derivative(self.z[j])\r\n\r\n\r\n return grad_b\r\n\r\n def calculate_gradian_of_cost_present_a(self,new_a,output,present_a):\r\n grad_a = np.zeros(shape=(self.n, 1))\r\n\r\n if self.number == 3:\r\n for i in range(self.n):\r\n for j in range(self.k):\r\n grad_a[i]+=(2*(new_a[j]-output[j]) * sigmoid_derivative(self.z[j]) * self.weights[j][i])\r\n\r\n\r\n elif self.number == 2:\r\n\r\n for i in range(self.n):\r\n for j in range(self.k):\r\n grad_a[i]+=(output[j] * sigmoid_derivative(self.z[j]) * self.weights[j][i])\r\n\r\n\r\n return grad_a\r\n\r\n\r\ndef sigmoid(x):\r\n return (1/(1 + np.exp(-x)))\r\n\r\n\r\ndef sigmoid_derivative(x):\r\n\r\n return np.multiply(sigmoid(x),(1-sigmoid(x)))\r\n\r\n\r\ndef divide_chunks(l, n):\r\n\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]\r\n\r\ndef show_image(img):\r\n image = img.reshape((28, 28))\r\n plt.imshow(image, 'gray')\r\n\r\ndef read_train_set():\r\n # Reading The Train Set\r\n train_images_file = open('train-images.idx3-ubyte', 'rb')\r\n train_images_file.seek(4)\r\n num_of_train_images = int.from_bytes(train_images_file.read(4), 'big')\r\n train_images_file.seek(16)\r\n train_labels_file = open('train-labels.idx1-ubyte', 'rb')\r\n train_labels_file.seek(8)\r\n\r\n for n in range(num_of_train_images):\r\n image = np.zeros((784, 1))\r\n for i in range(784):\r\n image[i, 0] = int.from_bytes(train_images_file.read(1), 'big') / 256\r\n\r\n label_value = int.from_bytes(train_labels_file.read(1), 'big')\r\n label = np.zeros((10, 1))\r\n label[label_value, 0] = 1\r\n\r\n train_set.append((image, label))\r\n\r\ndef read_test_set():\r\n # Reading The Test Set\r\n test_images_file = open('t10k-images.idx3-ubyte', 'rb')\r\n test_images_file.seek(4)\r\n test_labels_file = open('t10k-labels.idx1-ubyte', 'rb')\r\n test_labels_file.seek(8)\r\n num_of_test_images = int.from_bytes(test_images_file.read(4), 'big')\r\n test_images_file.seek(16)\r\n\r\n for n in range(num_of_test_images):\r\n image = np.zeros((784, 1))\r\n for i in range(784):\r\n image[i] = int.from_bytes(test_images_file.read(1), 'big') / 256\r\n\r\n label_value = int.from_bytes(test_labels_file.read(1), 'big')\r\n label = np.zeros((10, 1))\r\n label[label_value, 0] = 1\r\n\r\n test_set.append((image, label))\r\n\r\n\r\ndef feed_forward(first_layer,second_layer,third_layer):\r\n accuracy = 0\r\n for i in range(100):\r\n\r\n input = np.zeros(shape=(784, 1))\r\n input=train_set[i][0]\r\n second_layer_inputs=first_layer.calculate_next_nodes(input)\r\n third_layer_inputs=second_layer.calculate_next_nodes(second_layer_inputs)\r\n fourth_layer_inputs=third_layer.calculate_next_nodes(third_layer_inputs)\r\n j=0\r\n\r\n answer=0\r\n for row in train_set[i][1]:\r\n if row == 1 :\r\n answer=j\r\n j=j+1\r\n\r\n\r\n\r\n if np.nanargmax(fourth_layer_inputs) == answer :\r\n accuracy=accuracy+1\r\n\r\n print(\"Accuracy is : \")\r\n print(accuracy / 100)\r\n\r\ndef calculate_cost(output,result):\r\n cost=pow((output - result), 2)\r\n return sum(cost)\r\n\r\n\r\n\r\nstart=time.time()\r\n\r\ntrain_set = []\r\ntest_set = []\r\nread_train_set()\r\nread_test_set()\r\n\r\n# first step\r\nfirst_layer=layer(784,16,1)\r\nsecond_layer=layer(16,16,2)\r\nthird_layer=layer(16,10,3)\r\nfourth_layer=layer(10,0,4)\r\n\r\n# # part 1\r\n# feed_forward(first_layer,second_layer,third_layer)\r\n\r\n#part 2\r\nlearning_rate=1\r\nnumber_of_epochs=20\r\nbatch_size=10\r\n\r\ntrain_set_limited=[]\r\ncount=0\r\n\r\nfor row in train_set:\r\n train_set_limited.append(row)\r\n count+=1\r\n if count==100:\r\n break\r\n\r\naverage_costs=[]\r\n\r\nfor i in range(number_of_epochs):\r\n print(\"epoch \",i)\r\n\r\n random.shuffle(train_set_limited)\r\n costs = []\r\n\r\n x=list(divide_chunks(train_set_limited,batch_size))\r\n\r\n for batch in x:\r\n\r\n grad_w_layer1 = np.zeros(shape=(16, 784))\r\n grad_b_layer1 = np.zeros(shape=(16, 1))\r\n grad_w_layer2 = np.zeros(shape=(16, 16))\r\n grad_b_layer2 = np.zeros(shape=(16, 1))\r\n grad_w_layer3 = np.zeros(shape=(10, 16))\r\n grad_b_layer3 = np.zeros(shape=(10, 1))\r\n\r\n\r\n for image in batch:\r\n\r\n input = image[0]\r\n\r\n second_layer_inputs = first_layer.calculate_next_nodes(input)\r\n third_layer_inputs = second_layer.calculate_next_nodes(second_layer_inputs)\r\n fourth_layer_inputs = third_layer.calculate_next_nodes(third_layer_inputs)\r\n\r\n costs.append(calculate_cost(image[1],fourth_layer_inputs))\r\n\r\n grad_w_layer3=np.add(grad_w_layer3 , third_layer.calculate_gradian_of_cost_W(fourth_layer_inputs, image[1], third_layer_inputs))\r\n grad_b_layer3=np.add(grad_b_layer3 , third_layer.calculate_gradian_of_cost_B(fourth_layer_inputs, image[1], third_layer_inputs))\r\n\r\n output_grad_a_3=third_layer.calculate_gradian_of_cost_present_a(fourth_layer_inputs, image[1], third_layer_inputs)\r\n grad_w_layer2=np.add(grad_w_layer2 , second_layer.calculate_gradian_of_cost_W(third_layer_inputs, output_grad_a_3, second_layer_inputs))\r\n grad_b_layer2=np.add(grad_b_layer2 , second_layer.calculate_gradian_of_cost_B(third_layer_inputs, output_grad_a_3, second_layer_inputs))\r\n output_grad_a_2=second_layer.calculate_gradian_of_cost_present_a(third_layer_inputs, output_grad_a_3, second_layer_inputs)\r\n\r\n grad_w_layer1=np.add(grad_w_layer1 , first_layer.calculate_gradian_of_cost_W(second_layer_inputs, output_grad_a_2,input))\r\n grad_b_layer1=np.add(grad_b_layer1 , first_layer.calculate_gradian_of_cost_B(second_layer_inputs, output_grad_a_2,input))\r\n\r\n\r\n\r\n first_layer.set_weights(np.subtract(first_layer.get_weights(),np.multiply(learning_rate , np.divide(grad_w_layer1,batch_size))))\r\n first_layer.set_bias(np.subtract(first_layer.get_bias(), np.multiply(learning_rate , np.divide(grad_b_layer1 , batch_size))))\r\n\r\n second_layer.set_weights(np.subtract(second_layer.get_weights(), np.multiply(learning_rate , np.divide(grad_w_layer2 , batch_size))))\r\n second_layer.set_bias(np.subtract(second_layer.get_bias(), np.multiply(learning_rate ,np.divide(grad_b_layer2 , batch_size))))\r\n\r\n third_layer.set_weights(np.subtract(third_layer.get_weights(), np.multiply(learning_rate , np.divide(grad_w_layer3 , batch_size))))\r\n third_layer.set_bias(np.subtract(third_layer.get_bias(), np.multiply(learning_rate , np.divide(grad_b_layer3 , batch_size))))\r\n\r\n\r\n average_costs.append((sum(costs)) / len(costs))\r\n\r\n\r\n\r\n\r\nfeed_forward(first_layer,second_layer,third_layer)\r\n\r\n\r\nend=time.time()\r\nprint(\"Time:\",(end-start))\r\n\r\nt = np.arange(0, number_of_epochs, step=1)\r\nx_t = average_costs\r\nplt.plot(t, x_t)\r\nplt.show()" }, { "alpha_fraction": 0.5340442061424255, "alphanum_fraction": 0.5532600283622742, "avg_line_length": 30.275985717773438, "blob_id": "485fb2331e19d44939c8fcd7e6b8f734897447c3", "content_id": "9e54ce081c40a5eab7a5092420df97c8de849a63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9003, "license_type": "no_license", "max_line_length": 120, "num_lines": 279, "path": "/test-part.py", "repo_name": "kianahs/Artificial-nueral-network", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom numpy import random\r\n# import random\r\nimport time\r\n\r\n\r\nclass layer:\r\n k = 0\r\n n = 0\r\n number = 0\r\n z = []\r\n weights = []\r\n bias = []\r\n\r\n def __init__(self, n, k, number):\r\n self.n = n\r\n self.k = k\r\n self.number = number\r\n self.weights = random.normal(size=(self.k, self.n))\r\n # self.z=np.zeros(shape=(self.k, 1))\r\n self.bias = np.zeros(shape=(self.k, 1))\r\n\r\n def get_weights(self):\r\n return self.weights\r\n\r\n def set_weights(self, updated_value):\r\n self.weights = updated_value\r\n\r\n def get_bias(self):\r\n return self.bias\r\n\r\n def set_bias(self, updated_value):\r\n self.bias = updated_value\r\n\r\n def calculate_next_nodes(self, present_a):\r\n\r\n self.z = np.add(np.matmul(self.weights, present_a), self.bias)\r\n result = sigmoid(self.z)\r\n return result\r\n\r\n def calculate_gradian_of_cost_W(self, new_a, output, present_a):\r\n\r\n grad_w = np.zeros(shape=(self.k, self.n))\r\n\r\n if self.number == 3:\r\n\r\n grad_w = np.matmul(2 * (new_a - output) * sigmoid_derivative(self.z), present_a.transpose())\r\n\r\n\r\n else:\r\n\r\n grad_w = np.matmul((output) * sigmoid_derivative(self.z), present_a.transpose())\r\n\r\n return grad_w\r\n\r\n def calculate_gradian_of_cost_B(self, new_a, output, present_a):\r\n grad_b = np.zeros(shape=(self.k, 1))\r\n\r\n if self.number == 3:\r\n\r\n grad_b = 2 * (new_a - output) * sigmoid_derivative(self.z)\r\n\r\n else:\r\n\r\n grad_b = output * sigmoid_derivative(self.z)\r\n\r\n return grad_b\r\n\r\n def calculate_gradian_of_cost_present_a(self, new_a, output, present_a):\r\n grad_a = np.zeros(shape=(self.n, 1))\r\n\r\n if self.number == 3:\r\n\r\n grad_a = np.matmul(np.transpose(self.weights), (2 * (new_a - output) * sigmoid_derivative(self.z)))\r\n\r\n\r\n elif self.number == 2:\r\n\r\n grad_a = np.matmul(np.transpose(self.weights), (output * sigmoid_derivative(self.z)))\r\n\r\n return grad_a\r\n\r\n\r\ndef sigmoid(x):\r\n return (1 / (1 + np.exp(-x)))\r\n\r\n\r\ndef sigmoid_derivative(x):\r\n\r\n return np.multiply(sigmoid(x), (1 - sigmoid(x)))\r\n\r\n\r\ndef divide_chunks(l, n):\r\n\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]\r\n\r\n\r\ndef show_image(img):\r\n image = img.reshape((28, 28))\r\n plt.imshow(image, 'gray')\r\n\r\n\r\ndef read_train_set():\r\n # Reading The Train Set\r\n train_images_file = open('train-images.idx3-ubyte', 'rb')\r\n train_images_file.seek(4)\r\n num_of_train_images = int.from_bytes(train_images_file.read(4), 'big')\r\n train_images_file.seek(16)\r\n train_labels_file = open('train-labels.idx1-ubyte', 'rb')\r\n train_labels_file.seek(8)\r\n\r\n for n in range(num_of_train_images):\r\n image = np.zeros((784, 1))\r\n for i in range(784):\r\n image[i, 0] = int.from_bytes(train_images_file.read(1), 'big') / 256\r\n\r\n label_value = int.from_bytes(train_labels_file.read(1), 'big')\r\n label = np.zeros((10, 1))\r\n label[label_value, 0] = 1\r\n\r\n train_set.append((image, label))\r\n\r\n\r\ndef read_test_set():\r\n # Reading The Test Set\r\n test_images_file = open('t10k-images.idx3-ubyte', 'rb')\r\n test_images_file.seek(4)\r\n test_labels_file = open('t10k-labels.idx1-ubyte', 'rb')\r\n test_labels_file.seek(8)\r\n num_of_test_images = int.from_bytes(test_images_file.read(4), 'big')\r\n test_images_file.seek(16)\r\n\r\n # print(num_of_test_images)\r\n for n in range(num_of_test_images):\r\n image = np.zeros((784, 1))\r\n for i in range(784):\r\n image[i] = int.from_bytes(test_images_file.read(1), 'big') / 256\r\n\r\n label_value = int.from_bytes(test_labels_file.read(1), 'big')\r\n label = np.zeros((10, 1))\r\n label[label_value, 0] = 1\r\n\r\n test_set.append((image, label))\r\n\r\n\r\ndef feed_forward(first_layer, second_layer, third_layer):\r\n accuracy = 0\r\n for i in range(len(t_set)):\r\n\r\n input = np.zeros(shape=(784, 1))\r\n input = t_set[i][0]\r\n second_layer_inputs = first_layer.calculate_next_nodes(input)\r\n third_layer_inputs = second_layer.calculate_next_nodes(second_layer_inputs)\r\n fourth_layer_inputs = third_layer.calculate_next_nodes(third_layer_inputs)\r\n j = 0\r\n\r\n answer = 0\r\n for row in t_set[i][1]:\r\n if row == 1:\r\n answer = j\r\n j = j + 1\r\n\r\n\r\n if np.nanargmax(fourth_layer_inputs) == answer:\r\n accuracy = accuracy + 1\r\n\r\n print(\"Accuracy is : \")\r\n print(accuracy / len(t_set))\r\n\r\n\r\ndef calculate_cost(output, result):\r\n cost = pow((output - result), 2)\r\n return sum(cost)\r\n\r\n\r\nstart = time.time()\r\n\r\ntrain_set = []\r\ntest_set = []\r\nt_set=[]\r\nread_train_set()\r\nread_test_set()\r\n# t_set=train_set\r\nt_set=test_set\r\n# first step\r\nfirst_layer = layer(784, 16, 1)\r\nsecond_layer = layer(16, 16, 2)\r\nthird_layer = layer(16, 10, 3)\r\nfourth_layer = layer(10, 0, 4)\r\n\r\n# feed_forward(first_layer,second_layer,third_layer)\r\n\r\n# part 2\r\nlearning_rate = 1\r\nnumber_of_epochs = 5\r\nbatch_size = 50\r\n\r\n\r\naverage_costs = []\r\n\r\nfor i in range(number_of_epochs):\r\n print(\"epoch \", i)\r\n\r\n random.shuffle(train_set)\r\n costs = []\r\n\r\n x = list(divide_chunks(train_set, batch_size))\r\n\r\n for batch in x:\r\n\r\n grad_w_layer1 = np.zeros(shape=(16, 784))\r\n grad_b_layer1 = np.zeros(shape=(16, 1))\r\n grad_w_layer2 = np.zeros(shape=(16, 16))\r\n grad_b_layer2 = np.zeros(shape=(16, 1))\r\n grad_w_layer3 = np.zeros(shape=(10, 16))\r\n grad_b_layer3 = np.zeros(shape=(10, 1))\r\n\r\n for image in batch:\r\n\r\n input = image[0]\r\n\r\n second_layer_inputs = first_layer.calculate_next_nodes(input)\r\n third_layer_inputs = second_layer.calculate_next_nodes(second_layer_inputs)\r\n fourth_layer_inputs = third_layer.calculate_next_nodes(third_layer_inputs)\r\n\r\n costs.append(calculate_cost(image[1], fourth_layer_inputs))\r\n\r\n grad_w_layer3 = np.add(grad_w_layer3, third_layer.calculate_gradian_of_cost_W(fourth_layer_inputs, image[1],\r\n third_layer_inputs))\r\n grad_b_layer3 = np.add(grad_b_layer3, third_layer.calculate_gradian_of_cost_B(fourth_layer_inputs, image[1],\r\n third_layer_inputs))\r\n\r\n output_grad_a_3 = third_layer.calculate_gradian_of_cost_present_a(fourth_layer_inputs, image[1],\r\n third_layer_inputs)\r\n grad_w_layer2 = np.add(grad_w_layer2,\r\n second_layer.calculate_gradian_of_cost_W(third_layer_inputs, output_grad_a_3,\r\n second_layer_inputs))\r\n grad_b_layer2 = np.add(grad_b_layer2,\r\n second_layer.calculate_gradian_of_cost_B(third_layer_inputs, output_grad_a_3,\r\n second_layer_inputs))\r\n output_grad_a_2 = second_layer.calculate_gradian_of_cost_present_a(third_layer_inputs, output_grad_a_3,\r\n second_layer_inputs)\r\n\r\n grad_w_layer1 = np.add(grad_w_layer1,\r\n first_layer.calculate_gradian_of_cost_W(second_layer_inputs, output_grad_a_2, input))\r\n grad_b_layer1 = np.add(grad_b_layer1,\r\n first_layer.calculate_gradian_of_cost_B(second_layer_inputs, output_grad_a_2, input))\r\n\r\n\r\n first_layer.set_weights(\r\n np.subtract(first_layer.get_weights(), np.multiply(learning_rate, np.divide(grad_w_layer1, batch_size))))\r\n first_layer.set_bias(\r\n np.subtract(first_layer.get_bias(), np.multiply(learning_rate, np.divide(grad_b_layer1, batch_size))))\r\n\r\n second_layer.set_weights(\r\n np.subtract(second_layer.get_weights(), np.multiply(learning_rate, np.divide(grad_w_layer2, batch_size))))\r\n second_layer.set_bias(\r\n np.subtract(second_layer.get_bias(), np.multiply(learning_rate, np.divide(grad_b_layer2, batch_size))))\r\n\r\n third_layer.set_weights(\r\n np.subtract(third_layer.get_weights(), np.multiply(learning_rate, np.divide(grad_w_layer3, batch_size))))\r\n third_layer.set_bias(\r\n np.subtract(third_layer.get_bias(), np.multiply(learning_rate, np.divide(grad_b_layer3, batch_size))))\r\n\r\n\r\n average_costs.append((sum(costs)) / len(costs))\r\n\r\n\r\nfeed_forward(first_layer, second_layer, third_layer)\r\n\r\nend = time.time()\r\nprint(\"Time:\", (end - start))\r\n\r\nt = np.arange(0, number_of_epochs, step=1)\r\nx_t = average_costs\r\nplt.plot(t, x_t)\r\nplt.show()" } ]
2
gandalfsaxe/letomes
https://github.com/gandalfsaxe/letomes
27b376b0850c1b08d62e19da2ea777ce3c4a6b9e
5f73a4066fcf69260cb538c105acf898b22e756d
ee0000dbef0850e87ab12cf1c4d5e881383a2325
refs/heads/master
2021-10-10T12:39:22.069254
2019-01-10T21:22:04
2019-01-10T21:22:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.417955607175827, "alphanum_fraction": 0.6055817008018494, "avg_line_length": 32.41572952270508, "blob_id": "4e8398b7673f251b97f7cfa95d74a2a3519ea8b2", "content_id": "7fed16f78f628148a517ddd66b30b57ef8453ef3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2974, "license_type": "permissive", "max_line_length": 102, "num_lines": 89, "path": "/docker/examples/_ex10.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "def run_example10():\n \"\"\"\n This example demonstrates the indirect method (cartesian) on a point to point fixed time scenario.\n The boundary conditions are taken from a run of the indirect method.\n \"\"\"\n import pykep as pk\n import pygmo as pg\n import numpy as np\n from matplotlib import pyplot as plt\n from pykep.examples import add_gradient, algo_factory\n\n # 1 - Algorithm\n algo = algo_factory(\"snopt7\")\n\n # 2 - Problem\n udp = add_gradient(pk.trajopt.indirect_pt2pt(\n x0=[44914296854.488266, -145307873786.94177, 1194292.6437741749,\n 31252.149474878544, 9873.214642584162, -317.08718075574404, 1000],\n xf=[-30143999066.728119, -218155987244.44385, -3829753551.2279921,\n 24917.707565772216, -1235.74045124602, -638.05209482866155, 905.47894037275546],\n thrust=0.1,\n isp=3000,\n mu=pk.SUN_MU,\n tof=[616.77087591237546, 616.77087591237546],\n freetime=False,\n alpha=0, # quadratic control\n bound=True),\n with_grad=False\n )\n prob = pg.problem(udp)\n prob.c_tol = [1e-5] * prob.get_nc()\n\n # 3 - Population\n pop = pg.population(prob)\n z = np.hstack(([np.random.uniform(udp.udp_inner.tof[0],\n udp.udp_inner.tof[1])], 10 * np.random.randn(7)))\n pop.push_back(z)\n\n # 4 - Solve the problem (evolve)\n pop = algo.evolve(pop)\n\n # 5 - Continue the solution to mass optimal\n homotopy_path = [0.5, 0.75, 0.9, 1]\n for alpha in homotopy_path:\n z = pop.champion_x\n print(\"alpha: \", alpha)\n udp = add_gradient(pk.trajopt.indirect_pt2pt(\n x0=[44914296854.488266, -145307873786.94177, 1194292.6437741749,\n 31252.149474878544, 9873.214642584162, -317.08718075574404, 1000],\n xf=[-30143999066.728119, -218155987244.44385, -3829753551.2279921,\n 24917.707565772216, -1235.74045124602, -638.05209482866155, 905.47894037275546],\n thrust=0.1,\n isp=3000,\n mu=pk.SUN_MU,\n tof=[616.77087591237546, 616.77087591237546],\n freetime=False,\n alpha=alpha, # quadratic control\n bound=True),\n with_grad=False\n )\n prob = pg.problem(udp)\n prob.c_tol = [1e-5] * prob.get_nc()\n\n # 7 - Solve it\n pop = pg.population(prob)\n pop.push_back(z)\n pop = algo.evolve(pop)\n\n # 8 - Inspect the solution\n print(\"Feasible?:\", prob.feasibility_x(pop.champion_x))\n\n # plot trajectory\n axis = udp.udp_inner.plot_traj(pop.champion_x, quiver=True, mark=\"k\")\n plt.title(\"The trajectory in the heliocentric frame\")\n\n # plot control\n udp.udp_inner.plot_control(pop.champion_x)\n plt.title(\"The control profile (throttle)\")\n\n plt.ion()\n plt.show()\n\n udp.udp_inner.pretty(pop.champion_x)\n\n print(\"\\nDecision vector: \", list(pop.champion_x))\n\n\nif __name__ == \"__main__\":\n run_example10()\n" }, { "alpha_fraction": 0.45997703075408936, "alphanum_fraction": 0.5802374482154846, "avg_line_length": 29.36046600341797, "blob_id": "103094ce102cddadd4e3fd23400d60434cd9aa19", "content_id": "10e97c51795a6be8dc80680f0a4070b647c2e81c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2611, "license_type": "permissive", "max_line_length": 106, "num_lines": 86, "path": "/docker/examples/_ex9.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "def run_example9():\n \"\"\"\n This example demonstrates the indirect method (cartesian) on a point to planet variable time scenario.\n The starting conditions are taken from a run of the indirect method.\n \"\"\"\n import pykep as pk\n import pygmo as pg\n import numpy as np\n from matplotlib import pyplot as plt\n from pykep.examples import add_gradient, algo_factory\n\n # 1 - Algorithm\n algo = algo_factory(\"snopt7\")\n\n # 2 - Problem\n udp = add_gradient(pk.trajopt.indirect_pt2pl(\n x0=[44459220055.461708, -145448367557.6174, 1195278.0377499966,\n 31208.214734303529, 9931.5012318647168, -437.07278242521573, 1000],\n t0=1285.6637861007277,\n pf=\"mars\",\n thrust=0.1,\n isp=3000,\n mu=pk.SUN_MU,\n tof=[600, 720],\n alpha=0, # quadratic control\n bound=True),\n with_grad=False\n )\n prob = pg.problem(udp)\n prob.c_tol = [1e-5] * prob.get_nc()\n\n # 3 - Population\n pop = pg.population(prob)\n z = np.hstack(([np.random.uniform(udp.udp_inner.tof[0],\n udp.udp_inner.tof[1])], 10 * np.random.randn(7)))\n pop.push_back(z)\n\n # 4 - Solve the problem (evolve)\n pop = algo.evolve(pop)\n\n homotopy_path = [0.2, 0.4, 0.6, 0.8, 0.9, 0.98, 0.99, 0.995, 1]\n for alpha in homotopy_path:\n z = pop.champion_x\n print(\"alpha is: \", alpha)\n udp = add_gradient(pk.trajopt.indirect_pt2pl(\n x0=[44459220055.461708, -145448367557.6174, 1195278.0377499966,\n 31208.214734303529, 9931.5012318647168, -437.07278242521573, 1000],\n t0=1285.6637861007277,\n pf=\"mars\",\n thrust=0.1,\n isp=3000,\n mu=pk.SUN_MU,\n tof=[600, 720],\n alpha=alpha, # quadratic control\n bound=True),\n with_grad=False\n )\n prob = pg.problem(udp)\n prob.c_tol = [1e-5] * prob.get_nc()\n\n # 7 - Solve it\n pop = pg.population(prob)\n pop.push_back(z)\n pop = algo.evolve(pop)\n\n # 8 - Inspect the solution\n print(\"Feasible?:\", prob.feasibility_x(pop.champion_x))\n\n # plot trajectory\n axis = udp.udp_inner.plot_traj(pop.champion_x, quiver=True, mark='k')\n plt.title(\"The trajectory in the heliocentric frame\")\n\n # plot control\n udp.udp_inner.plot_control(pop.champion_x)\n plt.title(\"The control profile (throttle)\")\n\n plt.ion()\n plt.show()\n\n udp.udp_inner.pretty(pop.champion_x)\n\n print(\"\\nDecision vector: \", list(pop.champion_x))\n\n\nif __name__ == \"__main__\":\n run_example9()\n" }, { "alpha_fraction": 0.5829431414604187, "alphanum_fraction": 0.6066889762878418, "avg_line_length": 29.984455108642578, "blob_id": "673e371267a33551c6c331fffbfb9bc8fd0ef3ce", "content_id": "d91e71dad40f17890296efd32c9c66ee18b87a1d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5980, "license_type": "permissive", "max_line_length": 88, "num_lines": 193, "path": "/code/marscudasim/newephemerides.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nFunctions for getting celestial body positions (ephemerides).\nThese are used as input in equations of motions (analyticals.py) for distances to\nvarious celestial bodies.\n\"\"\"\nimport logging\nimport os\nfrom math import floor, pi, radians\nimport numpy as np\n\nimport pandas as pd\n\nfrom orbsim.r4b_3d.coordinate_system import (\n keep_phi_in_interval_npi_to_pi,\n keep_theta_in_interval_zero_to_pi,\n)\nfrom orbsim.r4b_3d import SUN_PHI, SUN_R, SUN_THETA\n\nlogger = logging.getLogger()\n\n\ndef get_ephemerides(\n relative_path=\"ephemerides/\", planets=(\"earth\", \"mars\"), max_year=\"2020\"\n):\n \"\"\" Get table of ephemerides for all specified bodies from 2019-01-01 00:00:00 until\n 01-01 00:00:00 of the end year.\n\n Note that the raw ephemerides files (e.g. `horizons_results_earth_2019-2020.txt`)\n are converted into the pre-processed files (e.g. `earth_2019-2020.csv`) by notebook\n `eph-import.ipynb`, downloaded from URL https://ssd.jpl.nasa.gov/horizons.cgi\n , with settings as described in `ssd-jpl-horizon-settings.md`.\n\n --INPUT--\n relative_path (str): relative path of ephemerides files (to this script)\n planets TUP(str): list of planets to include\n max_year (str): max_year-01-01 will be last date in ephemerides.\n\n --OUTOUT--:\n ephemerides (DICT(\"body\": pandas.df))\n \"\"\"\n\n # Input value checks\n VALID_planets = [\"earth\", \"mars\"]\n VALID_END_YEARS = [\"2020\", \"2039\", \"2262\"]\n\n for planet in planets:\n if planet not in VALID_planets:\n raise ValueError(\n \"Planets contain invalid planets (valid: 'earth' and 'mars')\"\n )\n\n if max_year not in VALID_END_YEARS:\n raise ValueError(\"Invalid max year. Must be '2020', '2039' or '2262'.\")\n\n # Change workdir, construct filenames\n relative_path = os.path.normcase(relative_path)\n path_parts = os.path.realpath(__file__).split(os.path.normcase(\"/\"))[:-1]\n path_parts.append(relative_path)\n abs_path = \"/\".join(path_parts)\n\n os.chdir(abs_path)\n # logging.debug(f\"Current working directory: {os.getcwd()}\")\n\n ephemerides_filename_dict = {}\n\n for planet in planets:\n ephemerides_filename_dict[planet] = f\"{planet}_2019-{max_year}.csv\"\n\n # Read CSV files into dict\n ephemerides = {}\n for body, csv_filename in ephemerides_filename_dict.items():\n imported_body = pd.read_csv(csv_filename, parse_dates=[\"date\"], index_col=\"day\")\n imported_body.insert(\n 0, \"day\", imported_body.index\n ) # 'day' also as first column\n ephemerides[body] = imported_body\n\n # Fix angles.\n \n\n phis = np.array(ephemerides[\"earth\"][\"phi\"])\n\n orbit_num = 0\n for i in range(len(phis)-1):\n phis[i+1] += orbit_num * 360\n if phis[i] > phis[i+1]:\n orbit_num += 1\n phis[i+1] += 360\n #for i in range(len(phis)):\n #phis[i] = radians(phis[i])\n \n ephemerides[\"earth\"][\"phi\"] = phis\n #print(phis[630:633])\n return ephemerides\n\n\ndef get_ephemerides_on_day(ephemerides, day_index=0):\n \"\"\"\n Get ephemerides of all bodies in input for specific input day (continuous).\n --INPUT--\n ephemerides (DICT(\"body\": pandas.df)): Dict of ephemerides, from get_ephemerides()\n date (int or float): Days since 2019-01-01 00:00:00\n \"\"\"\n\n # Check for day out of bounds with respect to the imported ephemerides\n max_day_index = len(ephemerides[\"earth\"]) - 2\n\n if day_index < -1 or day_index > max_day_index: # +2 due to starting on day=-1\n raise ValueError(f\"Day out of bounds, must be in interval [-1,{max_day_index}]\")\n\n day = day_index + 1 # Since day starts at -1, only used for velocity estimation\n\n day_lower = floor(day)\n day_upper = day_lower + 1\n day_increment = day % 1\n\n eph_on_day = {}\n\n for body, eph in ephemerides.items():\n\n start_position_df = eph.iloc[[day_lower]]\n end_position_df = eph.iloc[[day_upper]]\n start_position_series = start_position_df.iloc[0]\n end_position_series = end_position_df.iloc[0]\n\n diff_position_series = end_position_series - start_position_series\n\n interpolated_position = (\n start_position_series + day_increment * diff_position_series\n )\n\n eph_on_day[body] = interpolated_position\n\n sun = eph_on_day[\"earth\"].copy()\n sun[\"r\"] = SUN_R\n sun[\"theta\"] = SUN_THETA\n sun[\"phi\"] = SUN_PHI\n sun[\"x\"] = 0\n sun[\"y\"] = 0\n sun[\"z\"] = 0\n\n eph_on_day[\"sun\"] = sun\n\n return eph_on_day\n\n\ndef get_coordinates_on_day_rad(ephemerides_on_day):\n \"\"\"Take in ephemerides object in form af a Pandas.Series and extract just the\n coordinates, both cartesian and spherical.\n\n Arguments:\n ephemerides_on_day {Pandas.Series} -- Ephemerides table output from function\n ephemerides_on_day().\n\n Returns:\n Tuple(List(float)) -- Coordinates in both cartesian and spherical format in list\n [r,theta,phi,x,y,z] in a coordinate tuple\n (sun, earth, mars)\n \"\"\"\n\n R_ks = []\n theta_ks = []\n phi_ks = []\n\n for body in [\"sun\", \"earth\", \"mars\"]:\n R_ks.append(ephemerides_on_day[body][\"r\"])\n theta_ks.append(\n keep_theta_in_interval_zero_to_pi(\n ephemerides_on_day[body][\"theta\"] * pi / 180\n )\n )\n phi_ks.append(\n keep_phi_in_interval_npi_to_pi(ephemerides_on_day[body][\"phi\"] * pi / 180)\n )\n\n return R_ks, theta_ks, phi_ks\n\n\n# if __name__ == \"__main__\":\n\n# from pprint import pprint\n\n# test = get_coordinates_on_day_rad(get_ephemerides_on_day(get_ephemerides(), 0))\n\n# pprint(test)\n\n# pass\n\n# # # test_date = 124.26\n# # test_day = 0\n# # test_eph_on_day = get_ephemerides_on_day(test_eph, test_day)\n\n# # logging.info(f\"Ephemerides on day {test_day}:\\n {test_eph_on_day}\")\n" }, { "alpha_fraction": 0.5278481245040894, "alphanum_fraction": 0.558589518070221, "avg_line_length": 29.711111068725586, "blob_id": "0db3f07b14ed1bb7eb035a88ef343a486b86c28a", "content_id": "f925528fc005eef881c0b7001f762cd14475828a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5530, "license_type": "permissive", "max_line_length": 88, "num_lines": 180, "path": "/code/pyscripts/comparative.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\nfrom matplotlib.colors import Normalize\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport pandas as pd\n\nimport numpy as np\nfrom math import pi, log\nfrom scipy.stats import rankdata\n\n# === setup problem space, either real or Karpathy toy problem for validation ===\npspace = np.loadtxt(\"golf_course_zoom_s1024.txt\")\nsz = 1024\nX, Y = np.meshgrid(np.linspace(-1, 1, sz), np.linspace(-1, 1, sz))\nmux, muy, sigma = 0.3, -0.3, 4\nG1 = np.exp(-((X - mux) ** 2 + (Y - muy) ** 2) / 2.0 * sigma ** 2)\nmux, muy, sigma = -0.3, 0.3, 2\nG2 = np.exp(-((X - mux) ** 2 + (Y - muy) ** 2) / 2.0 * sigma ** 2)\nmux, muy, sigma = 0.6, 0.6, 2\nG3 = np.exp(-((X - mux) ** 2 + (Y - muy) ** 2) / 2.0 * sigma ** 2)\nmux, muy, sigma = -0.4, -0.2, 3\nG4 = np.exp(-((X - mux) ** 2 + (Y - muy) ** 2) / 2.0 * sigma ** 2)\nG = G1 + G2 - G3 - G4\n\n# uncomment this line if you want smooth toy-problem\n# pspace = G\ndims = pspace.shape\nprint(dims)\n\nstartpsi = [275,600]\nnp.random.seed(0)\n# startpsi = [np.random.randint(low=0, high=dim) for dim in dims]\n# sigma = 20\nalpha = 0.2\nepsi_size = 50\neval_budget = 2000\n\n# +++++++++++++++++++++ Evolution Strategies ++++++++++++++++++++++++++++++\ndef evolve(startpsi, eval_budget, timeline):\n psi = startpsi\n sigma = 20\n best_score = 100.0\n for i in range(int(eval_budget / epsi_size)):\n noise = np.random.randn(epsi_size, 2)\n halfway = int(noise.shape[0]/2)\n for j in range(halfway):\n noise[halfway+j] = -1*noise[j]\n x, y = psi\n x, y = [min(dims[0] - 1, max(0, x)), min(dims[1] - 1, max(0, y))]\n epsi = [x, y] + sigma * noise\n epsi = [\n [min(dims[0] - 1, max(0, x)), min(dims[1] - 1, max(0, y))] for x, y in epsi\n ]\n\n score = pspace[int(x)][int(y)]\n if score < best_score:\n best_score = score\n\n R = np.array([-pspace[int(x)][int(y)] for x, y in epsi])\n R = np.array(rankdata(R), dtype=float)\n Rmean = R.mean()\n for idx, v in enumerate(R):\n if v<Rmean:\n R[idx]=0\n # print(R)\n R /= sum(R)\n step_norm = np.dot(R, noise)\n step = alpha * sigma**2 * step_norm\n sigma -= 0.4\n sigma = max(0,sigma)\n psi += step\n timeline.append(\n {\n \"score\": score,\n \"best_score\": best_score,\n \"coords\": [x, y],\n \"step\": step,\n \"epsi\": epsi,\n }\n )\n\n\nes_timeline = []\nevolve(startpsi, eval_budget, es_timeline)\n\n# ===================== RANDOM GUESSING ==================================\nbest_run = {\"score\": 100.0, \"coords\": [0, 0]}\nrg_timeline = []\nx_timeline = []\ny_timeline = []\nfor _ in range(eval_budget):\n psi = [np.random.randint(low=0, high=dim) for dim in dims]\n score = pspace[psi[0]][psi[1]]\n\n if score < best_run[\"score\"]:\n best_run[\"score\"] = score\n best_run[\"coords\"] = psi\n\n rg_timeline.append(\n {\"best_score\": best_run[\"score\"], \"score\": score, \"coords\": best_run[\"coords\"]}\n )\n x_timeline.append(psi[0])\n y_timeline.append(psi[1])\n\n# ******************** PLOTTING ****************************************\n# ======== establish figs =================\nfig = plt.figure()\naxrand = fig.add_subplot(\"221\")\naxevos = fig.add_subplot(\"222\")\nax_rg_pspace = fig.add_subplot(\"223\")\nax_es_pspace = fig.add_subplot(\"224\")\n\n# ========= compute and plot convergence curves ==============\ncummean = np.array(\n pd.Series([part[\"score\"] for part in rg_timeline]).expanding().mean()\n)\naxrand.plot([part[\"best_score\"] for part in rg_timeline], color=\"black\")\naxrand.plot(cummean, \"b-\")\naxrand.plot([part[\"score\"] for part in rg_timeline], color=\"red\", alpha=0.4)\n\ncummean = np.array(\n pd.Series([part[\"score\"] for part in es_timeline]).expanding().mean()\n)\naxevos.plot([part[\"score\"] for part in es_timeline], color=\"red\", alpha=0.4)\naxevos.plot(cummean, \"b-\")\naxevos.plot([part[\"best_score\"] for part in es_timeline], color=\"black\")\n\n# ============= plot problem space bg images ====\ncmap = plt.cm.viridis\ncolors = Normalize(min(pspace.flatten()), max(pspace.flatten()))(pspace)\ncolors = cmap(colors)\nim = ax_rg_pspace.imshow(\n colors,\n vmin=min(pspace.flatten()),\n vmax=max(pspace.flatten()),\n extent=[0, dims[0], 0, dims[1]],\n interpolation=\"none\",\n origin='lower'\n)\nim2 = ax_es_pspace.imshow(\n colors,\n vmin=min(pspace.flatten()),\n vmax=max(pspace.flatten()),\n extent=[0, dims[0], 0, dims[1]],\n interpolation=\"none\",\n origin='lower'\n)\n\n# ========= colorbars =========================\nbestcoords = np.array([best[\"coords\"] for best in rg_timeline]).T\nax_rg_pspace.scatter(x_timeline, y_timeline, color=\"black\", s=0.2)\nax_rg_pspace.scatter(bestcoords[0], bestcoords[1], color=\"lime\", s=3)\n\nplt.colorbar(mappable=im2)\nepsis = np.array([part[\"epsi\"] for part in es_timeline])\ncoords = np.array([part[\"coords\"] for part in es_timeline])\nsteps = np.array([part[\"step\"] for part in es_timeline])\n\n# ES timeline plot\n# fig2 = plt.figure()\nfor idx, epsi in enumerate(epsis):\n ax_es_pspace.scatter(np.array(epsi).T[0], np.array(epsi).T[1], color=\"black\", s=0.2)\n\nax_es_pspace.plot(coords.T[0], coords.T[1], \"wo-\")\nax_es_pspace.scatter(coords.T[0], coords.T[1], color=\"black\", s=6)\narrow = [coords[-1], steps[-1]]\nax_es_pspace.arrow(\n arrow[0][0],\n arrow[0][1],\n arrow[1][0],\n arrow[1][1],\n head_width=3,\n head_length=5,\n fc=\"w\",\n ec=\"w\",\n)\n\n\nplt.show()\n\n" }, { "alpha_fraction": 0.5034666657447815, "alphanum_fraction": 0.5414666533470154, "avg_line_length": 35.9458122253418, "blob_id": "e84d741cb282fd9966228de7206c0e0f0e460908", "content_id": "d53de4981bc238439ce7135bd6d72cce8bad37e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7500, "license_type": "permissive", "max_line_length": 172, "num_lines": 203, "path": "/docker/examples/_ex3.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "import pykep as pk\n\n\nclass mga_lt_EVMe(object):\n\n \"\"\"\n This constructs a pagmo udp that represents a low-thrust transfer between Earth and Mercury with a Venus\n fly-by. The decision vector contains\n [\n t0,\n T1, mf1, Vxi1, Vyi1, Vzi1, Vxf1, Vyf1, Vzf1,\n T2, mf2, Vxi2, Vyi2, Vzi2, Vxf2, Vyf2, Vzf2,\n [throttles1], [throttles2]\n ]\n in the following units: [mjd2000, days, kg, m/s,m/s,m/s, [non-dimensional]]\n \"\"\"\n\n def __init__(self, mass=2000, Tmax=0.5, Isp=3500, Vinf_dep=3, Vinf_arr=2, nseg1=5, nseg2=20):\n # We define some data members (we use the double underscore to\n # indicate they are private)\n self.__earth = pk.planet.jpl_lp('earth')\n self.__venus = pk.planet.jpl_lp('venus')\n self.__mercury = pk.planet.jpl_lp('mercury')\n self.__sc = pk.sims_flanagan.spacecraft(mass, Tmax, Isp)\n self.__Vinf_dep = Vinf_dep * 1000\n self.__Vinf_arr = Vinf_arr * 1000\n self.__leg1 = pk.sims_flanagan.leg()\n self.__leg2 = pk.sims_flanagan.leg()\n self.__leg1.set_mu(pk.SUN_MU)\n self.__leg1.set_spacecraft(self.__sc)\n self.__leg2.set_mu(pk.SUN_MU)\n self.__leg2.set_spacecraft(self.__sc)\n self.__nseg1 = nseg1\n self.__nseg2 = nseg2\n self.__mass = mass\n\n def get_nec(self):\n return 15\n\n def get_nic(self):\n return self.__nseg1 + self.__nseg2 + 3\n\n def get_bounds(self):\n lb = [3000, 100, self.__mass / 2] + [-self.__Vinf_dep] * 3 + [-6000] * 3 + [200, self.__mass /\n 9] + [-6000] * 3 + [-self.__Vinf_arr] * 3 + [-1, -1, -1] * (self.__nseg1 + self.__nseg2)\n ub = [4000, 1000, self.__mass] + [self.__Vinf_dep] * 3 + [6000] * 3 + [2000, self.__mass] + \\\n [6000] * 3 + [self.__Vinf_arr] * 3 + \\\n [1, 1, 1] * (self.__nseg1 + self.__nseg2)\n return (lb, ub)\n\n # This is the objective function\n def fitness(self, x):\n from pykep import epoch, AU, EARTH_VELOCITY, fb_con\n from pykep.sims_flanagan import leg, sc_state\n from numpy.linalg import norm\n from math import sqrt, asin, acos\n\n retval = [-x[10]]\n\n # Ephemerides\n t_E = epoch(x[0])\n t_V = epoch(x[0] + x[1])\n t_M = epoch(x[0] + x[1] + x[9])\n rE, vE = self.__earth.eph(t_E)\n rV, vV = self.__venus.eph(t_V)\n rM, vM = self.__mercury.eph(t_M)\n\n # First Leg\n v = [a + b for a, b in zip(vE, x[3:6])]\n x0 = sc_state(rE, v, self.__sc.mass)\n v = [a + b for a, b in zip(vV, x[6:9])]\n xe = sc_state(rV, v, x[2])\n self.__leg1.set(\n t_E, x0, x[-3 * (self.__nseg1 + self.__nseg2):-self.__nseg2 * 3], t_V, xe)\n\n # Second leg\n v = [a + b for a, b in zip(vV, x[11:14])]\n x0 = sc_state(rV, v, x[2])\n v = [a + b for a, b in zip(vM, x[14:17])]\n xe = sc_state(rM, v, x[10])\n self.__leg2.set(t_E, x0, x[(-3 * self.__nseg2):], t_V, xe)\n\n # Defining the constraints\n # departure\n v_dep_con = (x[3] * x[3] + x[4] * x[4] + x[5] * x[5] -\n self.__Vinf_dep * self.__Vinf_dep) / (EARTH_VELOCITY * EARTH_VELOCITY)\n # arrival\n v_arr_con = (x[14] * x[14] + x[15] * x[15] + x[16] * x[16] -\n self.__Vinf_arr * self.__Vinf_arr) / (EARTH_VELOCITY * EARTH_VELOCITY)\n # fly-by at Venus\n DV_eq, alpha_ineq = fb_con(x[6:9], x[11:14], self.__venus)\n\n # Assembling the constraints\n constraints = list(self.__leg1.mismatch_constraints() + self.__leg2.mismatch_constraints()) + [DV_eq] + list(\n self.__leg1.throttles_constraints() + self.__leg2.throttles_constraints()) + [v_dep_con] + [v_arr_con] + [alpha_ineq]\n\n # We then scale all constraints to non-dimensional values\n # leg 1\n constraints[0] /= AU\n constraints[1] /= AU\n constraints[2] /= AU\n constraints[3] /= EARTH_VELOCITY\n constraints[4] /= EARTH_VELOCITY\n constraints[5] /= EARTH_VELOCITY\n constraints[6] /= self.__sc.mass\n # leg 2\n constraints[7] /= AU\n constraints[8] /= AU\n constraints[9] /= AU\n constraints[10] /= EARTH_VELOCITY\n constraints[11] /= EARTH_VELOCITY\n constraints[12] /= EARTH_VELOCITY\n constraints[13] /= self.__sc.mass\n # fly-by at Venus\n constraints[14] /= (EARTH_VELOCITY * EARTH_VELOCITY)\n\n return retval + constraints\n\n # And this helps to visualize the trajectory\n def plot(self, x):\n import matplotlib as mpl\n from mpl_toolkits.mplot3d import Axes3D\n import matplotlib.pyplot as plt\n from pykep import epoch, AU\n from pykep.sims_flanagan import sc_state\n from pykep.orbit_plots import plot_planet, plot_sf_leg\n\n t_E = epoch(x[0])\n t_V = epoch(x[0] + x[1])\n t_M = epoch(x[0] + x[1] + x[9])\n rE, vE = self.__earth.eph(t_E)\n rV, vV = self.__venus.eph(t_V)\n rM, vM = self.__mercury.eph(t_M)\n\n # First Leg\n v = [a + b for a, b in zip(vE, x[3:6])]\n x0 = sc_state(rE, v, self.__sc.mass)\n v = [a + b for a, b in zip(vV, x[6:9])]\n xe = sc_state(rV, v, x[2])\n self.__leg1.set(\n t_E, x0, x[-3 * (self.__nseg1 + self.__nseg2):-self.__nseg2 * 3], t_V, xe)\n\n # Second leg\n v = [a + b for a, b in zip(vV, x[11:14])]\n x0 = sc_state(rV, v, x[2])\n v = [a + b for a, b in zip(vM, x[14:17])]\n xe = sc_state(rM, v, x[10])\n self.__leg2.set(t_E, x0, x[(-3 * self.__nseg2):], t_V, xe)\n\n fig = plt.figure()\n axis = fig.gca(projection='3d')\n\n # The Sun\n axis.scatter([0], [0], [0], color='y')\n # The legs\n plot_sf_leg(self.__leg1, units=AU, N=10, ax=axis)\n plot_sf_leg(self.__leg2, units=AU, N=10, ax=axis)\n # The planets\n plot_planet(\n self.__earth, t_E, units=AU, legend=True, color=(0.7, 0.7, 1), ax=axis)\n plot_planet(\n self.__venus, t_V, units=AU, legend=True, color=(0.7, 0.7, 1), ax=axis)\n plot_planet(\n self.__mercury, t_M, units=AU, legend=True, color=(0.7, 0.7, 1), ax=axis)\n plt.show()\n\n\"\"\"\nThis example constructs, using pygmo for optimization, an interplanetary low-thrust optimization\nproblem that can then be solved using one of the available pygmo solvers. The problem is a non-linear constrained\nproblem that uses the Sims-Flanagan transcription to model the low-thrust trajectory. pykep plotting capabilities\nare also demonstrated via the plot method. The interplanetary mission modelled is an LT-MGA Earth-Venus-Mercury mission.\n\n\"\"\"\n\n\ndef run_example3():\n import pygmo as pg\n from pykep.examples import add_gradient, algo_factory\n\n # problem\n udp = add_gradient(mga_lt_EVMe(), with_grad=False)\n prob = pg.problem(udp)\n prob.c_tol = [1e-5] * prob.get_nc()\n\n # algorithm\n uda = algo_factory(\"snopt7\", False)\n uda2 = pg.mbh(uda, 5, 0.05)\n algo = pg.algorithm(uda2)\n algo.set_verbosity(1)\n\n # 3 - Population\n pop = pg.population(prob, 1)\n\n # 4 - Solve the problem (evolve)\n print(\"Running Monotonic Basin Hopping ....\")\n pop = algo.evolve(pop)\n\n print(\"Is the solution found a feasible trajectory? \" +\n str(prob.feasibility_x(pop.champion_x)))\n udp.udp_inner.plot(pop.champion_x)\n\nif __name__ == \"__main__\":\n run_example3()\n" }, { "alpha_fraction": 0.6192371249198914, "alphanum_fraction": 0.6325041651725769, "avg_line_length": 32.87640380859375, "blob_id": "61d7f8a9d93d8cb189172688fe66b70b5b3b242b", "content_id": "c39becabeac4d8720e108bfe9b94c5f300c086b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3015, "license_type": "permissive", "max_line_length": 104, "num_lines": 89, "path": "/code/tests/r4b_3d/test_integrators.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nPytest module of corresponding python file without \"test_\" in the name.\n\nTo test of another function, simply:\n1. Add tests to corresponding Mathematica script (run it to export JSON)\n2. Import function in this module.\n3. Copy the code block below (pytest decorator + test1) and\n 3.1 Increment function name test[i+1]\n 3.2 Put in function name string in:\n 3.2.1 First argument to process_test_data()\n 3.2.2 Docstring to test function\n\nThat's it!\n\"\"\"\nimport json\nimport os\n\nimport pytest\n\nfrom orbsim.r4b_3d.integrators import euler_step_symplectic # pylint: disable=W0611\n\nfrom orbsim.r4b_3d.ephemerides import get_ephemerides, get_ephemerides_on_day\n\n\ntestdata_folder_path = os.path.dirname(os.path.realpath(__file__))\ntestdata_filename = os.path.basename(__file__).split(\".\")[0] + \".json\"\ntestdata_file_path = testdata_folder_path + \"/\" + testdata_filename\n\nwith open(testdata_file_path) as file:\n test_data = json.load(file)\n\n\ndef process_test_data_symplectic_euler(function_name, input_type=\"list\"):\n \"\"\"\n Reformats JSON data to format suitable for @pytest.mark.parametrize decorator\n For example see https://docs.pytest.org/en/latest/parametrize.html.\n\n Arguments:\n function_name {str} -- Name of function to be tested.\n\n Keyword Arguments:\n output_type {str} -- Data in JSON will either be a single int/float or a list.\n If 'tuple' is passed in, the list will be converted to a\n tuple instead of a list. (default: {\"unchanged\"}).\n\n Returns:\n List[Tuple(Str, Any)] -- List of 2-tuples of function calls with input as\n strings and the expected output.\n \"\"\"\n\n tests = test_data[function_name]\n\n function_tests = []\n for arg, output in tests:\n if input_type == \"scalar\":\n arg = tuple(arg)\n arg_str = str(arg)[1:-1]\n if not isinstance(output, str):\n # Make tuple list (\"function(input)\", output)\n function_tests.append((f\"{function_name}({arg_str})\", output))\n else:\n # Make pytest.param marked with xfail, e.g. pytest.param(\"6*9\", 42, marks=pytest.mark.xfail)\n function_tests.append(\n pytest.param(\n f\"{function_name}({arg_str})\",\n None,\n marks=pytest.mark.xfail(raises=ValueError),\n )\n )\n\n return function_tests\n\n\[email protected](\n \"test_input, expected\", process_test_data_symplectic_euler(\"euler_step_symplectic\")\n)\ndef test1(test_input, expected):\n \"\"\"Test euler_step_symplectic\"\"\"\n\n # assert eval(test_input) == expected # pylint: disable=W0123\n\n for i, test_input_part in enumerate(eval(test_input)): # pylint: disable=W0123\n assert list(test_input_part) == pytest.approx(expected[i])\n # assert test_input_part == expected[i]\n\n\nif __name__ == \"__main__\":\n\n process_test_data_symplectic_euler(\"euler_step_symplectic\")\n" }, { "alpha_fraction": 0.6118518710136414, "alphanum_fraction": 0.6207407116889954, "avg_line_length": 22.241378784179688, "blob_id": "7cb4c97ce6e6e889bc701eb2305b644b135920e5", "content_id": "4602d58c142e7644a0cdaf315459e8fcd647e4cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 675, "license_type": "permissive", "max_line_length": 77, "num_lines": 29, "path": "/code/marscudasim/Makefile", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "TARGET\t= libcudasim.so\nLIBSRCS\t= \nLIBOBJS\t= cudasim.o cppsim.o initial_conditions.o\n\nOPT\t= -g -O3\nPIC = -fpic\nOMP = -fopenmp\nXPIC = -Xcompiler -fpic\nXOPT = -Xptxas=-v -lineinfo #-G # only use -G for debugging / profiler\nXARCH = -arch=sm_70\n\nCXX\t= nvcc\nCXXFLAGS = --compiler-options \"$(OPT) $(PIC) $(OMP)\" $(XARCH) $(XOPT) $(XPIC)\n\nCUDA_PATH ?= /appl/cuda/10.0\nINCLUDES = -I$(CUDA_PATH)/include -I$(CUDA_PATH)/samples/common/inc\n\nSOFLAGS = -shared\nXLIBS\t = \n\n$(TARGET): $(LIBOBJS)\n\t$(CXX) -o $@ $(CXXFLAGS) $(SOFLAGS) $(INCLUDES) $^ $(XLIBS)\n\n.SUFFIXES: .cu\n.cu.o:\n\t$(CXX) -o $*.o -c $*.cu $(CXXFLAGS) $(SOFLAGS) $(INCLUDES) -dc\n\nclean:\n\t/bin/rm -f $(TARGET) $(LIBOBJS) \n" }, { "alpha_fraction": 0.5351317524909973, "alphanum_fraction": 0.5788896083831787, "avg_line_length": 32.735450744628906, "blob_id": "c5ce4be4a38a822f9c0bda28475cf85fc1784cf1", "content_id": "f78345e2259fff752665bca0e59b6aba5606208f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6376, "license_type": "permissive", "max_line_length": 89, "num_lines": 189, "path": "/code/cudasim/cuda_rocketry.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "from orbsim.r3b_2d.simulators import run_sim\nfrom multiprocessing import Pool\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# import pygmo as pg\n# from pygmo import algorithm\nimport os\nimport sys\nfrom orbsim.r3b_2d.simulators import run_sim\n\n# from orbsim.plotting import orbitplot2d, orbitplot_non_inertial\nfrom orbsim.r3b_2d.analyticals import (\n ensure_bounds,\n random_disjoint_intervals,\n collapse_intervals,\n)\nimport time\nfrom numba import jit, njit\nimport math\nfrom math import pi\nfrom scipy.stats import rankdata\n\n# from ctypes import cdll\nfrom ctypes import *\n\ncudasim = cdll.LoadLibrary(\"./libcudasim.so\")\n\npi8 = pi / 8\npi4 = pi / 4\npi2 = pi / 2\ntau = 2 * pi\n\n\ndef evolve(psis, bounds, nIterations, nIndividuals, nJitter, maxDuration, maxSteps):\n init_sigma = 0.2 # spread\n init_alpha = 0.3 # learningrate\n sigma, alpha = init_sigma, init_alpha\n # sigma = np.ones(nIndividuals) * init_sigma\n # alpha = np.ones(nIndividuals) * init_alpha\n allscores=[]\n winners = []\n intermediate_winners = []\n bounds_list = bounds.values()\n np.random.seed(0)\n for _ in range(nIterations):\n\n \"\"\"\n make list of all paths to integrate\n \"\"\"\n jitter = []\n for _ in range(nIndividuals):\n noise = np.random.randn(nJitter, 3)\n halfway = int(noise.shape[0]/2)\n for i in range(halfway):\n noise[halfway+i] = -1*noise[i]\n jitter.append(noise)\n jitter = np.array(jitter)\n jitter = np.array([sigma * jitt for idx, jitt in enumerate(jitter)])\n jitter = jitter.reshape(nJitter, nIndividuals, 3)\n jitter[0] *= 0 # Make sure all set individuals are evaluated without jitter\n points = jitter + psis\n points = points.reshape(nIndividuals * nJitter, 3)\n for i, pt in enumerate(points):\n points[i] = ensure_bounds(pt, bounds_list)\n points = points.reshape(nJitter, nIndividuals, 3)\n successes = np.zeros(nIndividuals * nJitter, dtype=bool)\n scores = np.zeros(nIndividuals * nJitter)\n\n \"\"\"\n cudasim.integrate\n \n Input:\n nIndividuals Number of individuals (size of population)\n nJitter Number of random jitter points\n maxSteps Maximum number of steps of integration algorithm\n maxDuration Maximum t (in days) of integration algorithm\n inArray 1D input array of doubles; size is 3 x nIndividuals \n\n Output:\n successArray 1D ouput array of bools; size is 1 x nIndividuals\n scoreArray 1D ouput array of doubles; size is 1 x nIndividuals\n \n \"\"\"\n cudasim.integrate.restype = None\n cudasim.integrate.argtypes = [\n c_int,\n c_int,\n c_double,\n c_int,\n POINTER(c_double),\n POINTER(c_bool),\n POINTER(c_double),\n ]\n inArray = points.ctypes.data_as(POINTER(c_double))\n successArray = successes.ctypes.data_as(POINTER(c_bool))\n scoreArray = scores.ctypes.data_as(POINTER(c_double))\n cudasim.integrate(\n nIndividuals,\n nJitter,\n maxDuration,\n int(maxSteps),\n inArray,\n successArray,\n scoreArray,\n )\n\n print(\"successes=\", successes.sum())\n points = points.reshape(nIndividuals * nJitter, 3)\n for i, _ in enumerate(scores):\n scores[i] += points[i][2] # add burn dv\n if not successes[i]:\n scores[i] += 1\n scores[i] *= 10\n\n \"\"\"transform scores -- ranking\"\"\"\n scores = scores.reshape(nIndividuals, nJitter)\n ranked_scores = np.array(\n [rankdata(-1 * sig_eps, method=\"ordinal\") for sig_eps in scores]\n )\n for rscores in ranked_scores:\n rsum = rscores.sum()\n rscores = [\n rscore / rsum for rscore in rscores\n ] # make scores sum to 1\n # ranked_scores = -1 * ranked_scores\n\n steps = np.zeros([nIndividuals, 3])\n jitter = jitter.transpose(1, 0, 2)\n steps = np.array(\n [\n np.dot(ranked_scores[idx], jitter[idx]) * sigma**2 * alpha\n for idx in range(len(steps))\n ]\n )\n\n \"\"\"report winners\"\"\"\n points = points.reshape(nIndividuals, nJitter, 3)\n scores = scores.reshape(nIndividuals, nJitter)\n successes = successes.reshape(nIndividuals, nJitter)\n for idx, psi in enumerate(psis):\n allscores.append(f\"{scores[idx][0]} \")\n if successes[idx][0]:\n winners.append(str([idx, psi, scores[idx][0]]) + \"\\n\")\n for jdx, succ in enumerate(\n successes[idx][1:]\n ): # all but the first value, since the first value is the individual itself\n if succ:\n intermediate_winners.append(\n \" -- \"\n + str([idx, points[idx][jdx + 1], scores[idx][jdx + 1]])\n + \"\\n\"\n )\n allscores.append(\"\\n\")\n psis += steps\n\n scoresfile = open('cuda_moon_scores.txt', 'w')\n scoresfile.writelines(allscores)\n scoresfile.close()\n logfile = open(f\"cudaES.log\", \"w\")\n logfile.writelines(winners)\n logfile.writelines(intermediate_winners)\n logfile.close()\n\n\ndef initialize_psis(n, bounds):\n psis = [[random_disjoint_intervals(bound) for bound in bounds] for _ in range(n)]\n return psis\n\n\nif __name__ == \"__main__\":\n nIterations = 300\n nIndividuals = 1024\n nJitter = 32\n maxDuration = 100\n maxSteps = 1e7\n bounds = {\n \"pos\": np.array([[0, 1 * tau]]),\n \"ang\": np.array([[0, 1 * tau / 16], [tau / 2 - tau / 16, tau / 2]]),\n \"burn\": np.array([[3.1, 3.15]]),\n }\n psis = initialize_psis(nIndividuals, bounds.values())\n # pop.set_x(0, [-2.277654673852600, 0.047996554429844, 3.810000000000000])\n # pop.set_x(1, [-0.138042744751570, -0.144259374836607, 3.127288444444444])\n # pop.set_x(2, [-2.086814820119193, -0.000122173047640, 3.111181716545691])\n # print(pop)\n psis[0] = [4.005_530_633_326_986, 0.047_996_554_429_844, 3.810_000_000_000_000]\n evolve(psis, bounds, nIterations, nIndividuals, nJitter, maxDuration, maxSteps)\n" }, { "alpha_fraction": 0.586254894733429, "alphanum_fraction": 0.6037840843200684, "avg_line_length": 28.950000762939453, "blob_id": "d2c173c7a7104b675ef0bd5500b1c040f062c601", "content_id": "ac459773da03fca596b48a60eeea128bed625914", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3594, "license_type": "permissive", "max_line_length": 87, "num_lines": 120, "path": "/code/orbsim/r4b_3d/__init__.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nConstants specific to R4B-3D (Restricted 4-Body Problem in 3 Dimensions)\n\nUnless otherwise noted, all units will be in:\n- Mass: kg\n- Length: km\n- Time: days TODO: Change to seconds due to better fit with typical time step size\n\nVariable name conventions:\n- non_dim: dimensionless (nondimensionalized)\n\"\"\"\nimport json\nimport os\nfrom math import pi, sqrt\n\nfrom orbsim import (\n G,\n DAY,\n EARTH_MASS,\n EARTH_RADIUS,\n LUNAR_MASS,\n LUNAR_RADIUS,\n a_EARTH,\n T_EARTH,\n SUN_MU,\n EARTH_MU,\n MARS_MU,\n)\n\n############### SIMULATION CONSTANTS ###############\n\n# class: Planets (planets.py)\nEARTH_ALTITUDE = 160.0 # km\nLUNAR_ALTITUDE = 100.0 # km\nORBITAL_TOLERANCE = 10 # km\n\n# function: symplectic (integrators.py)\nDAY0 = 0 # start day of simulation, day=0 is 2019-01-01 00:00:00\nh_DEFAULT = 1e-6 # dimless time\nh_MIN_DEFAULT = 1e-10 # dimless time\nSTEP_ERROR_TOLERANCE = 1e-9 # dimless time\n\n\n############### CHARACTERISTIC UNITS ###############\n\nUNIT_LENGTH = a_EARTH # km/AU\n# (This is the precise definition, see https://en.wikipedia.org/wiki/Astronomical_unit)\nUNIT_TIME = T_EARTH # 1 year in s/year\nUNIT_VELOCITY = 4.7403885 # km/s (or 1 AU/y in km/s)\nUNIT_VELOCITY2 = UNIT_LENGTH / (UNIT_TIME * DAY) # km/s (just a check)\n\n\n############### DERIVED BOUNDARY CONDITIONS ###############\n\n# Initial orbit (Earth)\nLEO_RADIUS = EARTH_RADIUS + EARTH_ALTITUDE # km\nLEO_VELOCITY = sqrt(G * EARTH_MASS / (LEO_RADIUS)) # km/s\n\n# Target orbit (Moon)\nLLO_RADIUS = LUNAR_RADIUS + LUNAR_ALTITUDE # km\nLLO_VELOCITY = sqrt(G * LUNAR_MASS / (LLO_RADIUS)) # km/s\n\n# Stationary Sun\nSUN_R = 0\nSUN_THETA = 45 # radians: pi / 4\nSUN_PHI = 0\n\n############### NONDIMENSIONALIZATION ###############\n\n# Nondimensionalized boundary conditions\nLEO_RADIUS_NONDIM = LEO_RADIUS / UNIT_LENGTH # dimless\nLEO_VELOCITY_NONDIM = LEO_VELOCITY / UNIT_VELOCITY # dimless\n\n# Nondimensionalized standard gravitational parameters\nSUN_ETA = UNIT_TIME ** 2 / UNIT_LENGTH ** 3 * SUN_MU\nEARTH_ETA = UNIT_TIME ** 2 / UNIT_LENGTH ** 3 * EARTH_MU\nMARS_ETA = UNIT_TIME ** 2 / UNIT_LENGTH ** 3 * MARS_MU\n\n\ndef update_constants_json():\n \"\"\" Write constant to constants.json file in same directory\"\"\"\n\n # Write constants to text file\n constants_dict = {\n ############### SIMULATION CONSTANTS ###############\n \"EARTH_ALTITUDE\": EARTH_ALTITUDE,\n \"LUNAR_ALTITUDE\": LUNAR_ALTITUDE,\n \"ORBITAL_TOLERANCE\": ORBITAL_TOLERANCE,\n \"h_DEFAULT\": h_DEFAULT,\n \"h_MIN\": h_MIN_DEFAULT,\n \"STEP_ERROR_TOLERANCE\": STEP_ERROR_TOLERANCE,\n ############### CHARACTERISTIC UNITS ###############\n \"UNIT_LENGTH\": UNIT_LENGTH,\n \"UNIT_TIME\": UNIT_TIME,\n \"UNIT_VELOCITY\": UNIT_VELOCITY,\n \"UNIT_VELOCITY2\": UNIT_VELOCITY2,\n ############### DERIVED BOUNDARY CONDITIONS ##################\n \"LEO_RADIUS\": LEO_RADIUS,\n \"LEO_VELOCITY\": LEO_VELOCITY,\n \"LLO_RADIUS\": LLO_RADIUS,\n \"LLO_VELOCITY\": LLO_VELOCITY,\n \"SUN_R\": SUN_R,\n \"SUN_THETA\": SUN_THETA,\n \"SUN_PHI\": SUN_PHI,\n ############### NONDIMENSIONALIZATION ###############\n \"LEO_RADIUS_NONDIM\": LEO_RADIUS_NONDIM,\n \"LEO_VELOCITY_NONDIM\": LEO_VELOCITY_NONDIM,\n \"SUN_ETA\": SUN_ETA,\n \"EARTH_ETA\": EARTH_ETA,\n \"MARS_ETA\": MARS_ETA,\n }\n\n orbsim_path = os.path.dirname(os.path.abspath(__file__))\n\n with open(orbsim_path + \"/constants.json\", \"w\", newline=\"\\n\") as file:\n file.write(json.dumps(constants_dict, indent=2))\n\n\nif __name__ == \"__main__\":\n update_constants_json()\n" }, { "alpha_fraction": 0.6734142899513245, "alphanum_fraction": 0.7176113128662109, "avg_line_length": 58.2400016784668, "blob_id": "a163d965c18335fdd040b5f65fff49831d443155", "content_id": "91c94c536b2fee99af5a246ce4c6f699eb5d5591", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2964, "license_type": "permissive", "max_line_length": 165, "num_lines": 50, "path": "/code/orbsim/r4b_3d/ephemerides/ssd-jpl-horizon-settings.md", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "# NASA JPL SSD Horizons Web-Interface Settings\n\n## Current Settings\n\nEphemeris Type [change] : \tOBSERVER\n**Target Body [change] : \t \tEarth [Geocenter] [399] OR Mars [499]**\n**Observer Location [change] : \tSun (body center) [500@10]**\n**Time Span [change] : \tStart=2019-01-01, Stop=3019-01-01, Step=1 d**\n**Table Settings [change] : \tQUANTITIES=1,3,16-20,22,28,31,33,41; date/time format=BOTH; time digits=FRACSEC; angle format=DEG; extra precision=YES; CSV format=YES**\n**Display/Output [change] : \tdownload/save (plain text file)**\n\n_NOTE:_\nyear 2265 chosen due to the output lines are limited. Choosing e.g. 2019-3019 gave error:\n`Projected output length (~365243) exceeds 90024 line max -- change step-size`\n\nSo we chose:\n`2019+90024/365 = 2,265.64 = 2265`\n\nIt also turns out that Mars ephemerides [only goes](https://ssd.jpl.nasa.gov/eph_spans.cgi?id=A) to year 2500 Jan 04, in Horizon.\n\n## Table Settings\n\nSee \"Current Settings\" above for enabled quantities. Many quantities are not used to included anyway due to being of potential interest.\n\n**Boldface:** changed from default\n\nNOTE the following quantities were `n.a.` even though they could've been of potential interest:\n- 27.\tSun-Target radial & -vel pos. angle\n- 36.\tRA & DEC uncertainty\n- 39.\tRange & range-rate 3-sigmas\n\n### Optional observer-table settings:\n**date/time format : both -- display date/time in year-month-day and/or Julian-day format **\n**time digits : fractional seconds (HH:MM:SS.SSS) -- controls output precision of time **\n**angle format : decimal degrees -- select RA/Dec output format **\noutput units : km & km/s -- units for most output quantities \nrange units : astronomical units -- units for range-type quantities \nrefraction model : airless model (no refraction) -- select atmospheric refraction model \nairmass cut-off :-- suppress output when airmass is greater than limit [1 to 38] \nelevation cutoff : [EMPTY] (deg) -- suppress output when object elevation is less than limit [-90 to 90] \nsolar elong. cut-off : [EMPTY] - [EMPTY] (deg) -- suppress output when solar elongation is outside (min,max) range [0 to 180, min to 180]\nhour angle cutoff : [EMPTY] (h) -- suppress output when the local hour angle (LHA) exceeds value [0 to 12] \nangular rate cutoff : [EMPTY] (arcsec/h) -- suppress output when the RA/Dec angular rate exceeds this value [0 to 100000] \nsuppress range-rate : [UNCHECKED] -- suppress range-rate for range/range-rate output \nskip daylight : [UNCHECKED] -- suppress output during daylight \n**extra precision : [CHECKED] -- output addition digits for RA/Dec quantities **\nRTS flag : [DISABLE] -- output data only at target rise/transit/set (RTS) \nreference system : ICRF/J2000.0\t -- reference frame for geometric and astrometric quantities \n**CSV format : [CHECKED] -- output data in Comma-Separated-Values (CSV) format **\nobject page : [CHECKED] -- include object information/data page on output \n" }, { "alpha_fraction": 0.6008597612380981, "alphanum_fraction": 0.6220238208770752, "avg_line_length": 26.743120193481445, "blob_id": "38c6dcda75d61eb855d920d3a4c5295cb9dd8c7b", "content_id": "873ccf7e7d0a12182a1ad6d54762f942d29db8ad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3024, "license_type": "permissive", "max_line_length": 91, "num_lines": 109, "path": "/code/orbsim/r4b_3d/equations_of_physics.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nAll physics equations not related to Hamilton's equations (equations of motion).\n\n- Closed circular\n - orbital speed\n - period\n\"\"\"\n\nfrom math import pi, sqrt\n\nimport logging\n\nfrom orbsim import EARTH_RADIUS, MARS_RADIUS, SUN_RADIUS\nfrom orbsim.r4b_3d import EARTH_MU, MARS_MU, SUN_MU, UNIT_LENGTH, UNIT_TIME\n\n# from orbsim.r4b_3d.coordinate_system import (\n# get_position_spherical_from_cartesian,\n# get_speed_spherical,\n# get_velocity_spherical_from_cartesian,\n# )\n# from orbsim.r4b_3d.ephemerides import get_ephemerides, get_ephemerides_on_day\n\n# region Circular Orbit\ndef get_circular_orbit_speed(body=\"Earth\", altitude=160):\n \"\"\" Get speed of LEO (Low Earth Orbit) at designated altitude.\n\n Keyword Arguments:\n altitude {int} -- distance above Earth surface in km (default: {160})\n\n Returns:\n [int] -- speed in km/s.\n \"\"\"\n\n if body == \"Sun\":\n mu = SUN_MU\n radius = SUN_RADIUS\n elif body == \"Earth\":\n mu = EARTH_MU\n radius = EARTH_RADIUS\n elif body == \"Mars\":\n mu = MARS_MU\n radius = MARS_RADIUS\n\n v = sqrt(mu / (radius + altitude))\n\n v_au_y = v / (UNIT_LENGTH / UNIT_TIME)\n\n logging.debug(\n f\"Circular orbital speed around {body} at {altitude} km altitude (km/s): {v}\"\n f\" (Initial 160 km LEO expected speed: 7.812 km/s (via Wolfram Alpha))\"\n # https://www.wolframalpha.com/input/?i=7.812+km%2Fs+in+au%2Fy\n )\n logging.debug(\n f\"Circular orbital speed around {body} at {altitude} km altitude (AU/y):\"\n f\" {v_au_y}\"\n f\" (Initial 160 km LEO expected speed: 1.6468 au/y (via Wolfram Alpha))\"\n # https://www.wolframalpha.com/input/?i=circular+orbital+speed+earth+altitude+160km\n )\n\n return v\n\n\ndef get_circular_orbit_period(body=\"Earth\", altitude=160):\n \"\"\" Get period of LEO (Low Earth Orbit) at designated altitude.\n\n Keyword Arguments:\n altitude {int} -- distance above Earth surface in km (default: {160})\n\n Returns:\n [int] -- period in s.\n \"\"\"\n\n if body == \"Sun\":\n mu = SUN_MU\n radius = SUN_RADIUS\n elif body == \"Earth\":\n mu = EARTH_MU\n radius = EARTH_RADIUS\n elif body == \"Mars\":\n mu = MARS_MU\n radius = MARS_RADIUS\n\n T = 2 * pi * sqrt((radius + altitude) ** 3 / (mu))\n\n T_y = T * UNIT_TIME\n\n logging.debug(\n f\"Circular orbital period around {body} at {altitude} km altitude (s): {T}\"\n f\" (Initial 160 km LEO expected period: 5261 s (via Wolfram Alpha)\"\n )\n # https://www.wolframalpha.com/input/?i=circular+orbital+period+earth+altitude+160km\n\n logging.debug(\n f\"Circular orbital period around {body} at {altitude} km altitude (h): {T_y}\"\n f\" (Initial 160 km LEO expected period: 1.461 h (Via Wolfram Alpha)\"\n )\n # https://www.wolframalpha.com/input/?i=circular+orbital+period+earth+altitude+160km\n\n return T\n\n\n# endregion\n\n\nif __name__ == \"__main__\":\n\n get_circular_orbit_speed()\n\n get_circular_orbit_period()\n" }, { "alpha_fraction": 0.5867313742637634, "alphanum_fraction": 0.6023140549659729, "avg_line_length": 41.86800003051758, "blob_id": "80fd796d42154e7966136703a56c22544a43c174", "content_id": "db3e02603e8452d7fa13bf4b9f59170e5b275bd4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10717, "license_type": "permissive", "max_line_length": 142, "num_lines": 250, "path": "/code/marscudasim/initial_conditions.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"Functions that calculate various useful initial conditions for the R4B simulator.\"\"\"\n\nimport logging\nfrom math import degrees, radians, floor, pi\n\nimport numpy as np\nfrom numpy import cos, sin, sqrt, tan\n\nfrom orbsim import EARTH_RADIUS, SUN_RADIUS, DAY\nfrom orbsim.r4b_3d import UNIT_LENGTH, UNIT_TIME, UNIT_VELOCITY\nfrom orbsim.r4b_3d.coordinate_system import (\n get_position_spherical_from_cartesian,\n get_position_cartesian_from_spherical,\n get_speed_spherical,\n get_speed_cartesian,\n get_velocity_spherical_from_cartesian,\n get_distance_spherical\n)\nfrom new_ephemerides import get_ephemerides, get_ephemerides_on_day\nfrom orbsim.r4b_3d.equations_of_physics import get_circular_orbit_speed\n\nfrom orbsim.r4b_3d.equations_of_motion import get_B_R, get_B_theta, get_B_phi\n\nfrom ctypes import *\n\ncudasim = cdll.LoadLibrary(\"./libcudasim.so\")\n\ndef rotate(v, k, theta):\n cos_theta = cos(theta)\n sin_theta = sin(theta)\n\n rotated = (v * cos_theta) + (np.cross(k, v) * sin_theta) + (k * np.dot(k, v)) * (1 - cos_theta)\n return rotated\n\ndef get_leo_positions_and_velocities(days=[0], burndvs=[0], tilts=[0], h = 0.1 / UNIT_TIME, altitude=160, max_year=\"2020\"):\n hday = h * UNIT_TIME / DAY;\n earth_aphelion_AU = 1.0167\n earth_perihelion_AU = 0.98329\n #days[1] = 365.256363004\n\n #earth_a_AU = (earth_aphelion_AU + earth_perihelion_AU) / 2\n #earth_f_AU = earth_aphelion_AU - earth_a_AU\n #earth_b_AU = sqrt(earth_a_AU * earth_a_AU - earth_f_AU * earth_f_AU)\n\n ephemerides = get_ephemerides(max_year=max_year)\n\n day0s = np.zeros((len(days) * len(burndvs) * len(tilts)))\n Q0s = np.zeros((len(days) * len(burndvs) * len(tilts), 3))\n B0s = np.zeros((len(days) * len(burndvs) * len(tilts), 3))\n\n for n in range(len(days) * len(burndvs) * len(tilts)):\n\n i = int(floor(n / (len(burndvs) * len(tilts))))\n m = n % (len(burndvs) * len(tilts))\n j = int(floor(m / len(tilts)))\n k = m % len(tilts)\n\n day0s[n] = days[i]\n\n eph_day = get_ephemerides_on_day(ephemerides, days[i])\n\n sun_day = eph_day[\"sun\"]\n s_spherical = np.array([sun_day[\"r\"], radians(sun_day[\"theta\"]), radians(sun_day[\"phi\"])])\n\n mars_day = eph_day[\"mars\"]\n m_spherical = np.array([mars_day[\"r\"], radians(mars_day[\"theta\"]), radians(mars_day[\"phi\"])])\n\n earth_day = eph_day[\"earth\"]\n e_spherical = np.array([earth_day[\"r\"], radians(earth_day[\"theta\"]), radians(earth_day[\"phi\"])])\n e_cartesian = np.array(get_position_cartesian_from_spherical(e_spherical[0], e_spherical[1], e_spherical[2]))\n e_cartesian_unit = e_cartesian / np.linalg.norm(e_cartesian)\n\n eph_day0 = get_ephemerides_on_day(ephemerides, floor(days[i]))\n earth_day0 = eph_day0[\"earth\"]\n e_cartesian0 = np.array([earth_day0[\"x\"], earth_day0[\"y\"], earth_day0[\"z\"]])\n\n eph_day1 = get_ephemerides_on_day(ephemerides, floor(days[i] + 1))\n earth_day1 = eph_day1[\"earth\"]\n e_cartesian1 = np.array([earth_day1[\"x\"], earth_day1[\"y\"], earth_day1[\"z\"]])\n\n ev_cartesian = (e_cartesian1 - e_cartesian0) * UNIT_TIME / DAY\n ev_cartesian_unit = ev_cartesian / np.linalg.norm(ev_cartesian)\n ev_spherical = np.array(get_velocity_spherical_from_cartesian(e_cartesian, ev_cartesian))\n\n e_orbital_plane_cartesian = np.cross(e_cartesian, ev_cartesian)\n e_orbital_plane_cartesian /= np.linalg.norm(e_orbital_plane_cartesian)\n\n\n c_leo_cartesian = -1.0 * np.cross(e_orbital_plane_cartesian, ev_cartesian)\n c_leo_cartesian_unit = c_leo_cartesian / np.linalg.norm(c_leo_cartesian)\n\n c_cartesian = c_leo_cartesian_unit * (EARTH_RADIUS + altitude) / UNIT_LENGTH + e_cartesian\n c_spherical = np.array(get_position_spherical_from_cartesian(c_cartesian[0], c_cartesian[1], c_cartesian[2]))\n\n e_speed = UNIT_VELOCITY * get_speed_cartesian(ev_cartesian[0], ev_cartesian[1], ev_cartesian[2])\n leo_speed = get_circular_orbit_speed(\"Earth\", altitude)\n burn_speed = burndvs[j]\n #cv_cartesian = (ev_cartesian_unit * (e_speed + leo_speed)) / UNIT_VELOCITY\n\n leo_cartesian_unit = rotate(ev_cartesian_unit, c_leo_cartesian_unit, tilts[k])\n burn_cartesian_unit = rotate(ev_cartesian_unit, e_orbital_plane_cartesian, 0)\n burn_cartesian = burn_cartesian_unit * burndvs[j] / UNIT_VELOCITY\n #cv_spherical_unit = np.array(get_position_spherical_from_cartesian(ev_cartesian_unit[0], ev_cartesian_unit[1], ev_cartesian_unit[2]))\n #cv_cartesian_unit = np.array(get_position_cartesian_from_spherical(cv_spherical_unit[0], cv_spherical_unit[1], cv_spherical_unit[2]))\n\n cv_cartesian = (ev_cartesian_unit * e_speed + \n leo_cartesian_unit * leo_speed +\n leo_cartesian_unit * burndvs[j]) / UNIT_VELOCITY\n c_speed = UNIT_VELOCITY * get_speed_cartesian(cv_cartesian[0], cv_cartesian[1], cv_cartesian[2])\n cv_spherical = np.array(get_velocity_spherical_from_cartesian(c_cartesian, cv_cartesian))\n\n \"\"\"\n\n c_spherical = np.array([e_spherical[0] +\n (EARTH_RADIUS + altitude) / UNIT_LENGTH,\n e_spherical[1],\n e_spherical[2]]) \n c_cartesian = get_position_cartesian_from_spherical(c_spherical[0], c_spherical[1], c_spherical[2]);\n\n cv_leo_cartesian = np.cross(e_orbital_plane_cartesian, c_cartesian)\n cv_leo_cartesian_unit = cv_leo_cartesian / np.linalg.norm(cv_leo_cartesian)\n \n e_speed = UNIT_VELOCITY * get_speed_cartesian(ev_cartesian[0], ev_cartesian[1], ev_cartesian[2])\n leo_speed = get_circular_orbit_speed(\"Earth\", altitude)\n cv_cartesian = (ev_cartesian_unit * e_speed +\n cv_leo_cartesian_unit * leo_speed) / UNIT_VELOCITY\n c_speed = UNIT_VELOCITY * get_speed_cartesian(cv_cartesian[0], cv_cartesian[1], cv_cartesian[2])\n cv_spherical = np.array(get_velocity_spherical_from_cartesian(c_cartesian, cv_cartesian))\n\n \"\"\"\n e_s_distance = get_distance_spherical(e_spherical, s_spherical) * UNIT_LENGTH\n c_e_distance = get_distance_spherical(c_spherical, e_spherical) * UNIT_LENGTH\n c_m_distance = get_distance_spherical(c_spherical, m_spherical) * UNIT_LENGTH\n \"\"\"\n print(\"===========================\", n, i, j, k, days[i], \"===============================\")\n print(\"e_orbital_plane_cartesian=\", e_orbital_plane_cartesian)\n print(\"e_cartesian=\", e_cartesian)\n print(\"c_cartesian=\", c_cartesian)\n# print(\"e_s_distance=\", e_s_distance)\n print(\"c_e_distance=\", c_e_distance)\n# print(\"c_m_distance=\", c_m_distance)\n print(\"----------------------------------------------------\")\n print(\"tilt=\", tilts[k])\n print(\"ev_cartesian=\", ev_cartesian)\n print(\"cv_cartesian=\", cv_cartesian)\n #print(\"burn_cartesian=\", burn_cartesian)\n print(\"ev_cartesian_unit=\", ev_cartesian_unit)\n print(\"c_leo_cartesian_unit=\", c_leo_cartesian_unit)\n print(\"leo_cartesian_unit=\", leo_cartesian_unit)\n print(\"burn_cartesian_unit=\", burn_cartesian_unit)\n print(\"----------------------------------------------------\")\n print(\"e_speed=\", e_speed)\n print(\"leo_speed=\", leo_speed)\n print(\"burn_speed=\", burn_speed)\n print(\"c_speed=\", c_speed)\n print(\"----------------------------------------------------\")\n print(\"e_spherical=\", e_spherical)\n print(\"c_spherical=\", c_spherical)\n print(\"ev_spherical=\", ev_spherical)\n print(\"cv_spherical=\", cv_spherical)\n print(\"====================================================\")\n \"\"\"\n # FINAL OUTPUT: Initial coordinates (Q)\n Q0s[n] = c_spherical\n\n # FINAL OUTPUT: Initial momenta per mass (B)\n R, theta, _ = c_spherical\n Rdot, thetadot, phidot = cv_spherical\n\n B_R = get_B_R(Rdot)\n B_theta = get_B_theta(R, thetadot)\n B_phi = get_B_phi(R, theta, phidot)\n\n B0s[n] = np.array([B_R, B_theta, B_phi])\n\n return day0s, Q0s, B0s\n\ndef get_leo_positions_and_velocities_C(days=[0], burndvs=[0], tilts=[0], h = 0.1 / UNIT_TIME, altitude=160, max_year=\"2020\"):\n days = np.asarray(days)\n burndvs = np.asarray(burndvs)\n tilts = np.asarray(tilts)\n\n day0s = np.zeros((len(days) * len(burndvs) * len(tilts)))\n Q0s = np.zeros((len(days) * len(burndvs) * len(tilts), 3))\n B0s = np.zeros((len(days) * len(burndvs) * len(tilts), 3))\n\n ephemerides = get_ephemerides(max_year=max_year)\n earth = np.array(ephemerides['earth'])\n mars = np.array(ephemerides['mars'])\n earth_R = earth[:,3].astype(np.float64)\n earth_theta = earth[:,4].astype(np.float64) * pi / 180\n earth_phi = earth[:,5].astype(np.float64) * pi / 180\n mars_R = mars[:,3].astype(np.float64)\n mars_theta = mars[:,4].astype(np.float64) * pi / 180\n mars_phi = mars[:,5].astype(np.float64) * pi / 180\n\n cudasim.initial_conditions.restype = None\n cudasim.initial_conditions.argtypes = [\n c_int,\n POINTER(c_double),\n c_int,\n POINTER(c_double),\n c_int,\n POINTER(c_double),\n c_double,\n c_int,\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n ]\n\n days_ctype = days.ctypes.data_as(POINTER(c_double))\n burndvs_ctype = burndvs.ctypes.data_as(POINTER(c_double))\n tilts_ctype = tilts.ctypes.data_as(POINTER(c_double))\n day0s_ctype = day0s.ctypes.data_as(POINTER(c_double))\n Q0s_ctype = Q0s.ctypes.data_as(POINTER(c_double))\n B0s_ctype = B0s.ctypes.data_as(POINTER(c_double))\n earth_R_ctype = earth_R.ctypes.data_as(POINTER(c_double))\n earth_theta_ctype = earth_theta.ctypes.data_as(POINTER(c_double))\n earth_phi_ctype = earth_phi.ctypes.data_as(POINTER(c_double))\n mars_R_ctype = mars_R.ctypes.data_as(POINTER(c_double))\n mars_theta_ctype = mars_theta.ctypes.data_as(POINTER(c_double))\n mars_phi_ctype = mars_phi.ctypes.data_as(POINTER(c_double))\n\n cudasim.initial_conditions(\n int(days.size),\n days_ctype,\n int(burndvs.size),\n burndvs_ctype,\n int(tilts.size),\n tilts_ctype,\n altitude,\n int(earth_R.size),\n earth_R_ctype,\n earth_theta_ctype,\n earth_phi_ctype,\n mars_R_ctype,\n mars_theta_ctype,\n mars_phi_ctype,\n day0s_ctype,\n Q0s_ctype,\n B0s_ctype,\n )\n return day0s, Q0s, B0s\n" }, { "alpha_fraction": 0.7872340679168701, "alphanum_fraction": 0.8156028389930725, "avg_line_length": 69.5, "blob_id": "db4072f33a7e9e19d58d8a4ed8d0115ae85e4916", "content_id": "6fcb1af0d2146d591db05a54bf551f01af028378", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 141, "license_type": "permissive", "max_line_length": 130, "num_lines": 2, "path": "/README.md", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "# letomes\nMSc Thesis \"Low Energy Transfer Orbits from Earth using Evolution Strategies\" at the Technical University of Denmark, Spring 2018.\n" }, { "alpha_fraction": 0.548028290271759, "alphanum_fraction": 0.5735310912132263, "avg_line_length": 28.969696044921875, "blob_id": "120254ea617e7d1eec6a9ddd49e57282a9f54fd9", "content_id": "815019660a73a94c8d4737cc80c245c73445fbfc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8901, "license_type": "permissive", "max_line_length": 88, "num_lines": 297, "path": "/code/orbsim/r4b_3d/equations_of_motion.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nEquations of motion for R4B-3D system (Restricted 4-Body Problem in 3 Dimensions),\nderived via Hamiltons's equations.\n\nIncludes at least:\n- 3 Qdot: coordinate differential equations in spherical coordinates.\n- 3 Pdot: generalized momentum differential equations in spherical coordinates.\n- 3 P: generalized momentum as function of Qdot.\n- Hamiltonian\n\nAll non-dimensionalized and scaled with mass of spacecraft (see derivations in report)\n\"\"\"\n\nfrom math import pi\n\nimport numpy as np\nfrom numpy import cos, sin, sqrt, tan\n\nfrom orbsim.r4b_3d import EARTH_ETA, MARS_ETA, SUN_ETA\n\neta_ks = [SUN_ETA, EARTH_ETA, MARS_ETA]\n\n\n# region Coodinate Derivatives: Qdot(Q, B)\ndef get_Rdot(B_R):\n \"\"\"Rdot(R, theta, phi, B_R, B_theta, B_phi) from Hamilton's equations\"\"\"\n return B_R\n\n\ndef get_thetadot(R, B_theta):\n \"\"\"thetadot(R, theta, phi, B_R, B_theta, B_phi) from Hamilton's equations\"\"\"\n if R <= 0:\n raise ValueError(\"R must be positive.\")\n return B_theta / (R ** 2)\n\n\ndef get_phidot(R, theta, B_phi):\n \"\"\"phidot(R, theta, phi, B_R, B_theta, B_phi) from Hamilton's equations\"\"\"\n\n if R <= 0:\n raise ValueError(\"R cannot be less than or equal to zero.\")\n elif theta <= 0 or theta >= pi:\n raise ValueError(\"theta must be in range 0 < theta < pi.\")\n\n return B_phi / (R ** 2 * sin(theta) ** 2)\n\n\n# endregion\n\n# region Momentum Derivatives: Bdot(Q, B, Qk)\ndef get_Bdot_R(R, theta, phi, B_theta, B_phi, R_ks, theta_ks, phi_ks):\n \"\"\"\n Gives Bdot_R, i.e. the time derivative of the generalized momentum in R direction,\n per unit mass and in chosen characteristic units.\n\n Arguments:\n R {float} -- R coordinate (AU)\n theta {float} -- theta coordinate (rad)\n phi {float} -- phi coordinate (rad)\n B_theta {float} -- B_theta momentum (linear velocity in R direction, AU/y)\n B_phi {float} -- B_theta (angular theta momentum per mass, AU^2/y)\n R_ks {List(float)} -- Coordinates [R_sun, R_earth, R_mars], AU\n theta_ks {List(float)} -- Coordinates [theta_sun, theta_earth, theta_mars], rad\n phi_ks {List(float)} -- Coordinates [phi_sun, phi_earth, phi_mars], rad\n\n Raises:\n ValueError -- Out of range coordinates.\n\n Returns:\n float -- Bdot_R\n \"\"\"\n\n if R <= 0:\n raise ValueError(\"R cannot be less than or equal to zero.\")\n if theta <= 0 or theta >= pi:\n raise ValueError(\"theta must be in range 0 < theta < pi.\")\n if phi <= -pi or phi > pi:\n raise ValueError(\"phi must be in range -pi < phi <= pi.\")\n\n for R_k in R_ks:\n if R_k < 0:\n raise ValueError(\"All R_k must zero or be positive (allow for SUN_R = 0)\")\n for theta_k in theta_ks:\n if theta_k <= 0 or theta_k >= pi:\n raise ValueError(\"theta_k must be in range 0 < theta_k < pi.\")\n for phi_k in phi_ks:\n if phi_k <= -pi or phi_k > pi:\n raise ValueError(\"phi_k must be in range -pi < phi_k <= pi.\")\n\n R_ks = np.array(R_ks)\n theta_ks = np.array(theta_ks)\n phi_ks = np.array(phi_ks)\n\n numerators = eta_ks * (\n -R\n + R_ks\n * (cos(theta) * cos(theta_ks) + sin(theta) * sin(theta_ks) * cos(phi - phi_ks))\n )\n\n denominators_base = (\n R ** 2\n + R_ks ** 2\n - 2\n * R\n * R_ks\n * (cos(theta) * cos(theta_ks) + sin(theta) * sin(theta_ks) * cos(phi - phi_ks))\n )\n\n denominators = denominators_base * sqrt(denominators_base)\n\n summation = np.sum(numerators / denominators)\n\n Bdot_R1 = B_theta ** 2 / (R ** 3)\n Bdot_R2 = B_phi ** 2 / (R ** 3 * sin(theta) ** 2)\n Bdot_R3 = summation\n\n Bdot_R = Bdot_R1 + Bdot_R2 + Bdot_R3\n\n return Bdot_R\n\n\ndef get_Bdot_theta(R, theta, phi, B_phi, R_ks, theta_ks, phi_ks):\n \"\"\"\n Gives Bdot_theta, i.e. the time derivative of the generalized momentum in theta\n direction, per unit mass and in chosen characteristic units.\n\n Arguments:\n R {float} -- R coordinate (AU)\n theta {float} -- theta coordinate (rad)\n phi {float} -- phi coordinate (rad)\n B_phi {float} -- B_theta (angular theta momentum per mass, AU^2/y)\n R_ks {List(float)} -- Coordinates [R_sun, R_earth, R_mars], AU\n theta_ks {List(float)} -- Coordinates [theta_sun, theta_earth, theta_mars], rad\n phi_ks {List(float)} -- Coordinates [phi_sun, phi_earth, phi_mars], rad\n\n Raises:\n ValueError -- Out of range coordinates.\n\n Returns:\n float -- Bdot_theta\n \"\"\"\n\n if R <= 0:\n raise ValueError(\"R cannot be less than or equal to zero.\")\n if theta <= 0 or theta >= pi:\n raise ValueError(\"theta must be in range 0 < theta < pi.\")\n if phi <= -pi or phi > pi:\n raise ValueError(\"phi must be in range -pi < phi <= pi.\")\n\n for R_k in R_ks:\n if R_k < 0:\n raise ValueError(\"All R_k must zero or be positive (allow for SUN_R = 0)\")\n for theta_k in theta_ks:\n if theta_k <= 0 or theta_k >= pi:\n raise ValueError(\"theta_k must be in range 0 < theta_k < pi.\")\n for phi_k in phi_ks:\n if phi_k <= -pi or phi_k > pi:\n raise ValueError(\"phi_k must be in range -pi < phi_k <= pi.\")\n\n R_ks = np.array(R_ks)\n theta_ks = np.array(theta_ks)\n phi_ks = np.array(phi_ks)\n\n numerators = eta_ks * (\n R\n * R_ks\n * (-sin(theta) * cos(theta_ks) + cos(theta) * sin(theta_ks) * cos(phi - phi_ks))\n )\n\n denominators_base = (\n R ** 2\n + R_ks ** 2\n - 2\n * R\n * R_ks\n * (cos(theta) * cos(theta_ks) + sin(theta) * sin(theta_ks) * cos(phi - phi_ks))\n )\n\n denominators = denominators_base * sqrt(denominators_base)\n\n summation = np.sum(numerators / denominators)\n\n Bdot_theta1 = B_phi ** 2 / (R ** 2 * sin(theta) ** 2 * tan(theta))\n Bdot_theta2 = summation\n\n Bdot_theta = Bdot_theta1 + Bdot_theta2\n\n return Bdot_theta\n\n\ndef get_Bdot_phi(R, theta, phi, R_ks, theta_ks, phi_ks):\n \"\"\"\n Gives Bdot_phi, i.e. the time derivative of the generalized momentum in theta\n direction, per unit mass and in chosen characteristic units.\n\n Arguments:\n R {float} -- R coordinate (AU)\n theta {float} -- theta coordinate (rad)\n phi {float} -- phi coordinate (rad)\n R_ks {List(float)} -- Coordinates [R_sun, R_earth, R_mars], AU\n theta_ks {List(float)} -- Coordinates [theta_sun, theta_earth, theta_mars], rad\n phi_ks {List(float)} -- Coordinates [phi_sun, phi_earth, phi_mars], rad\n\n Raises:\n ValueError -- Out of range coordinates.\n\n Returns:\n float -- Bdot_phi\n \"\"\"\n\n if R <= 0:\n raise ValueError(\"R cannot be less than or equal to zero.\")\n if theta <= 0 or theta >= pi:\n raise ValueError(\"theta must be in range 0 < theta < pi.\")\n if phi <= -pi or phi > pi:\n raise ValueError(\"phi must be in range -pi < phi <= pi.\")\n\n for R_k in R_ks:\n if R_k < 0:\n raise ValueError(\"All R_k must zero or be positive (allow for SUN_R = 0)\")\n for theta_k in theta_ks:\n if theta_k <= 0 or theta_k >= pi:\n raise ValueError(\"theta_k must be in range 0 < theta_k < pi.\")\n for phi_k in phi_ks:\n if phi_k <= -pi or phi_k > pi:\n raise ValueError(\"phi_k must be in range -pi < phi_k <= pi.\")\n\n R_ks = np.array(R_ks)\n theta_ks = np.array(theta_ks)\n phi_ks = np.array(phi_ks)\n\n numerators = eta_ks * (-R * R_ks * sin(theta) * sin(theta_ks) * sin(phi - phi_ks))\n\n denominators_base = (\n R ** 2\n + R_ks ** 2\n - 2\n * R\n * R_ks\n * (cos(theta) * cos(theta_ks) + sin(theta) * sin(theta_ks) * cos(phi - phi_ks))\n )\n\n denominators = denominators_base * sqrt(denominators_base)\n\n summation = np.sum(numerators / denominators)\n\n Bdot_phi = summation\n\n return Bdot_phi\n\n\n# endregion\n\n# region Momenta B(Q, Qdot) - Derived from Qdot(Q, B)\ndef get_B_R(Rdot):\n \"\"\"Get B_R from Q, Qdot\"\"\"\n\n return Rdot\n\n\ndef get_B_theta(R, thetadot):\n \"\"\"Get B_theta from Q, Qdot\"\"\"\n if R <= 0:\n raise ValueError(\"R cannot be less than or equal to zero.\")\n\n return R ** 2 * thetadot\n\n\ndef get_B_phi(R, theta, phidot):\n \"\"\"Get B_phi from Q, Qdot\"\"\"\n if R <= 0:\n raise ValueError(\"R cannot be less than or equal to zero.\")\n if theta <= 0 or theta >= pi:\n raise ValueError(\"theta must be in range 0 < theta < pi.\")\n\n return R ** 2 * sin(theta) ** 2 * phidot\n\n\n# endregion\n\n\n# if __name__ == \"__main__\":\n\n# from pprint import pprint\n\n# pprint(\n# get_Bdot_R(\n# 1.1,\n# 3.1315926535897933,\n# 3.141592653589793,\n# 0.2,\n# -0.1,\n# [0.0, 0.983580560001, 1.470582878522],\n# [0.013707783890401887, 1.1997429598510756, 1.264411333882953],\n# [0.0, 2.0274978713480216, 6.283185307179586],\n# )\n# )\n" }, { "alpha_fraction": 0.6839378476142883, "alphanum_fraction": 0.6839378476142883, "avg_line_length": 18.100000381469727, "blob_id": "59e1d7697b42de2ca7efa5b3be7214a11283cb18", "content_id": "0f4e4450772cbfcc10f62e2e1f318a7f8797be7b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 193, "license_type": "permissive", "max_line_length": 55, "num_lines": 10, "path": "/code/marscudasim/equations_of_physics.h", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"constants.h\"\n#include <math.h>\n\n__host__ __device__\ninline double get_circular_orbit_speed(double altitude)\n{\n return sqrt(EARTH_MU / (EARTH_RADIUS + altitude));\n}\n \n" }, { "alpha_fraction": 0.4621337652206421, "alphanum_fraction": 0.4766906201839447, "avg_line_length": 31.497005462646484, "blob_id": "bc99fbeabdddfd390878f3a574fff8626dd4f060", "content_id": "dc9447eb5c141f67e50fc9557bb0ec2dd21feaae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5427, "license_type": "permissive", "max_line_length": 397, "num_lines": 167, "path": "/code/r3b_bsc/symplectic.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nReduced 3-Body Problem Solver Module\n====================================\nA collection of various numerical solvers for the reduced 3-body problem consisting of two larger masses (Earth, Moon) and one smaller moving in their gravitational field (a satellite). The solution assumes Earth-Moon center of mass as origin and a cartesian x-y coordinate system rotating with the lines connecting the Earth and Moon (non-inertial frame accounted for in the equations of motion).\n\nFunctions:\n euler: Solves by Euler method explicitly, implicitly or symplectically.\n\nWe assume TODO: FILL OUT HERE!\n\n\"\"\"\n\nfrom math import pi, sqrt\n\nimport numpy as np\nfrom numba import jit\n\nfrom orbsim import DAY, EARTH_RADIUS\nfrom orbsim.r3b_2d import (\n ORBITAL_TOLERANCE,\n EARTH_POSITION_X,\n k,\n LLO_RADIUS,\n LLO_VELOCITY,\n LUNAR_POSITION_X,\n UNIT_LENGTH,\n UNIT_VELOCITY,\n h_DEFAULT,\n h_MIN_DEFAULT,\n STEP_ERROR_TOLERANCE,\n)\nfrom orbsim.r3b_2d.analyticals import get_pdot_x, get_pdot_y, get_v_x, get_v_y\nfrom orbsim.r3b_2d.integrators import euler_step_symplectic, verlet_step_symplectic\n\n\n@jit\ndef symplectic(\n n, duration, x, y, p_x, p_y, xs, ys, p_xs, p_ys, step_errors, h_list, info\n):\n # Initialize values\n h = h_DEFAULT\n h_min = h_MIN_DEFAULT\n # STEP_ERROR_TOLERANCE = STEP_ERROR_TOLERANCE\n\n # max_steps = duration\n step_error = 1e-15\n status = 1\n target_dist = 1\n target = 1\n target_pos_x = LUNAR_POSITION_X\n # target = 2; target_pos_x = L1_position_x\n target_pos_y = 0\n\n # Time reset\n t = 0\n for i in range(n):\n\n # Store position\n xs[i] = x\n ys[i] = y\n p_xs[i] = p_x\n p_ys[i] = p_y\n step_errors[i] = step_error\n h_list[i] = h\n\n # Integrate time period\n dt = duration * (i + 1) / n\n count = 0\n while t < dt:\n # Safety on iterations\n count += 1\n if count > 10000000:\n count = 0\n h_min = 2 * h_min\n\n # Adaptive symplectic euler/midpoint\n x1, y1, p1_x, p1_y = euler_step_symplectic(h, x, y, p_x, p_y)\n x2, y2, p2_x, p2_y = verlet_step_symplectic(h, x, y, p_x, p_y)\n\n # Relative local error of step\n step_error = sqrt(\n (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) / (x2 * x2 + y2 * y2)\n )\n\n # Accept the step only if the weighted error is no more than the\n # tolerance STEP_ERROR_TOLERANCE. Estimate an h that will yield an error of STEP_ERROR_TOLERANCE on\n # the next step and use 0.8 of this value to avoid failures.\n if step_error < STEP_ERROR_TOLERANCE or h <= h_min:\n\n # Accept step\n x = x2\n y = y2\n p_x = p2_x\n p_y = p2_y\n\n # Forward time by step\n t = t + h\n h = max(\n h_min, h * max(0.1, 0.8 * sqrt(STEP_ERROR_TOLERANCE / step_error))\n )\n\n else:\n # No accept, reduce h to half\n h = max(h_min, 0.5 * h)\n\n # How close are we to the moon?\n rx = x - target_pos_x\n ry = y - target_pos_y\n r = sqrt(rx * rx + ry * ry)\n target_dist = min(target_dist, r)\n\n # Check if we hit the target\n if status == 1:\n if target == 1:\n r_low = (LLO_RADIUS - ORBITAL_TOLERANCE) / UNIT_LENGTH\n r_high = (LLO_RADIUS + ORBITAL_TOLERANCE) / UNIT_LENGTH\n else:\n r_low = 0\n r_high = ORBITAL_TOLERANCE / UNIT_LENGTH\n\n if r > r_low and r < r_high:\n\n # Current velocity\n v_x = p_x + y\n v_y = p_y - x\n\n if target == 1:\n\n # Project velocity onto radius vector and subtract\n # so velocity vector is along orbit\n vr = (v_x * rx + v_y * ry) / r ## FIXME: Check if vr is correct\n v_x = v_x - vr * rx / r\n v_y = v_y - vr * ry / r\n\n # Now adjust velocity to lunar orbit velocity\n vt = sqrt(v_x * v_x + v_y * v_y)\n p_x = (LLO_VELOCITY / UNIT_VELOCITY) * v_x / vt - y\n p_y = (LLO_VELOCITY / UNIT_VELOCITY) * v_y / vt + x\n\n # Total velocity change\n dv = sqrt(\n vr * vr\n + (vt - LLO_VELOCITY / UNIT_VELOCITY)\n * (vt - LLO_VELOCITY / UNIT_VELOCITY)\n )\n else:\n dv = sqrt(v_x * v_x + v_y * v_y)\n\n # Store info\n info[0] = dv\n info[1] = t\n\n # Finish?\n status = -10000 + dv\n if n == 1:\n return status\n\n # Check if we hit the earth\n r = (x - EARTH_POSITION_X) * (x - EARTH_POSITION_X) + y * y # FIXME: sqrt?\n r_high = EARTH_RADIUS / UNIT_LENGTH\n if r < r_high * r_high:\n return 100 # Hit earth surface\n\n if status >= 0:\n status = target_dist\n\n return status\n" }, { "alpha_fraction": 0.5324225425720215, "alphanum_fraction": 0.5626322627067566, "avg_line_length": 33.87919616699219, "blob_id": "ba82c5dc49a0d05fee426dffbd001237cd7a3e0e", "content_id": "95a90152d28efc2cae1af55fba9e8a17d3245ae4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5197, "license_type": "permissive", "max_line_length": 128, "num_lines": 149, "path": "/docker/examples/_ex4.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "import pykep as pk\n\n\nclass mga_lt_earth_mars_sundmann(object):\n\n \"\"\"\n This constructs a pagmo.problem object that represents a low-thrust transfer between Earth and Jupiter.\n The interplantary leg uses Sundman's variable. The decision vector\n contains [t0,T,sf,mf,Vx,Vy,Vz,[throttles]] in the following units: [mjd2000, days, days, kg, m/s,m/s,m/s, [non-dimensional]]\n \"\"\"\n\n def __init__(self, mass=1000, Tmax=0.1, Isp=2500, Vinf=3.0, nseg=20):\n # We define some data members (we use the double underscore to\n # indicate they are private)\n self.__earth = pk.planet.jpl_lp('earth')\n self.__mars = pk.planet.jpl_lp('jupiter')\n self.__sc = pk.sims_flanagan.spacecraft(mass, Tmax, Isp)\n self.__Vinf = Vinf * 1000\n # here we construct the trajectory leg in Sundman's variable t =\n # (r/10AU)^1.5 s\n self.__leg = pk.sims_flanagan.leg_s(\n nseg, 1.0 / (100 * pk.AU) ** 1.0, 1.0)\n self.__leg.set_mu(pk.SUN_MU)\n self.__leg.set_spacecraft(self.__sc)\n # This is needed to use the plotting function plot_sf_leg\n self.__leg.high_fidelity = False\n self.__nseg = nseg\n\n def get_nic(self):\n return self.__nseg + 1\n\n def get_nec(self):\n return 8\n\n def get_bounds(self):\n lb = [5000, 2400, 10000, self.__sc.mass / 10, -self.__Vinf, -\n self.__Vinf, -self.__Vinf] + [-1] * 3 * self.__nseg\n ub = [8000, 2500, 150000, self.__sc.mass, self.__Vinf,\n self.__Vinf, self.__Vinf] + [1] * 3 * self.__nseg\n return (lb, ub)\n\n def fitness(self, x):\n from pykep import epoch, AU, EARTH_VELOCITY, DAY2SEC\n from pykep.sims_flanagan import sc_state\n # This is the objective function\n objfun = [-x[3]]\n\n # And these are the constraints\n start = epoch(x[0])\n end = epoch(x[0] + x[1])\n\n r, v = self.__earth.eph(start)\n v = [a + b for a, b in zip(v, x[4:7])]\n x0 = sc_state(r, v, self.__sc.mass)\n\n r, v = self.__mars.eph(end)\n xe = sc_state(r, v, x[3])\n self.__leg.set(start, x0, x[-3 * self.__nseg:],\n end, xe, x[2] * DAY2SEC)\n v_inf_con = (x[4] * x[4] + x[5] * x[5] + x[6] * x[6] -\n self.__Vinf * self.__Vinf) / (EARTH_VELOCITY * EARTH_VELOCITY)\n try:\n constraints = list(self.__leg.mismatch_constraints(\n ) + self.__leg.throttles_constraints()) + [v_inf_con]\n except:\n print(\n \"warning: CANNOT EVALUATE constraints .... possible problem in the Taylor integration in the Sundmann variable\")\n constraints = (1e14,) * (8 + 1 + self.__nseg + 2)\n # We then scale all constraints to non-dimensional values\n constraints[0] /= AU\n constraints[1] /= AU\n constraints[2] /= AU\n constraints[3] /= EARTH_VELOCITY\n constraints[4] /= EARTH_VELOCITY\n constraints[5] /= EARTH_VELOCITY\n constraints[6] /= self.__sc.mass\n constraints[7] /= 365.25 * DAY2SEC\n return objfun + constraints\n\n # And this helps to visualize the trajectory\n def plot(self, x):\n import matplotlib as mpl\n from mpl_toolkits.mplot3d import Axes3D\n import matplotlib.pyplot as plt\n from pykep import epoch, AU, DAY2SEC\n from pykep.sims_flanagan import sc_state\n from pykep.orbit_plots import plot_planet, plot_sf_leg\n\n start = epoch(x[0])\n end = epoch(x[0] + x[1])\n\n r, v = self.__earth.eph(start)\n v = [a + b for a, b in zip(v, x[4:7])]\n x0 = sc_state(r, v, self.__sc.mass)\n\n r, v = self.__mars.eph(end)\n xe = sc_state(r, v, x[3])\n self.__leg.set(\n start, x0, x[-3 * self.__nseg:], end, xe, x[2] * DAY2SEC)\n\n fig = plt.figure()\n axis = fig.gca(projection='3d')\n # The Sun\n axis.scatter([0], [0], [0], color='y')\n # The leg\n plot_sf_leg(self.__leg, units=AU, N=10, ax=axis)\n # The planets\n plot_planet(\n self.__earth, start, units=AU, legend=True, color=(0.8, 0.8, 1), ax=axis)\n plot_planet(\n self.__mars, end, units=AU, legend=True, color=(0.8, 0.8, 1), ax=axis)\n plt.show()\n\n\n\"\"\"\nThis example demonstrates the use of the interplanetary leg in Sundman's variable to obtain automated mesh optimization\n\"\"\"\n\n\ndef run_example4():\n import pygmo as pg\n from pykep.examples import add_gradient, algo_factory\n\n N = 20\n\n # problem\n udp = add_gradient(mga_lt_earth_mars_sundmann(nseg=N), with_grad=False)\n prob = pg.problem(udp)\n prob.c_tol = [1e-5] * prob.get_nc()\n\n # algorithm\n uda = algo_factory(\"snopt7\", False)\n uda2 = pg.mbh(uda, 5, 0.05)\n algo = pg.algorithm(uda2)\n algo.set_verbosity(1)\n\n # 3 - Population\n pop = pg.population(prob, 1)\n\n # 4 - Solve the problem (evolve)\n print(\"Running Monotonic Basin Hopping ....\")\n pop = algo.evolve(pop)\n\n print(\"Is the solution found a feasible trajectory? \" +\n str(prob.feasibility_x(pop.champion_x)))\n udp.udp_inner.plot(pop.champion_x)\n\nif __name__ == \"__main__\":\n run_example4()\n" }, { "alpha_fraction": 0.5162907242774963, "alphanum_fraction": 0.5998328924179077, "avg_line_length": 27.843374252319336, "blob_id": "736935aa21650305ed72b0953549af6f8976923b", "content_id": "b0284a5916bc9a172e5b79ab4b917b7aae341a26", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2394, "license_type": "permissive", "max_line_length": 85, "num_lines": 83, "path": "/code/orbsim/__init__.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nConstants common to all simulations.\n\nUnless otherwise noted, all units will be in:\n- Mass: kg\n- Length: km\n- Time: days TODO: Change to seconds due to better fit with typical time step size\n\"\"\"\nimport json\nimport os\n\n############### TABLE / PHYSICAL CONSTANTS ###############\n# See physical_constants_and_units.nb, but can be looked up in any table / Wikipedia\n\n# Physical constants\n# G = 6.67384e-11 / 1000 ** 3 # kg^-1 km^3 s^-2\nG = 6.674080000000000335154563053e-11 / 1000 ** 3 # kg^-1 km^3 s^-2\nDAY = 24.0 * 3600.0 # s\n\n# Solar system quantities\na_EARTH = 1.495978871467657760097664981e8 # km (semi major axis of Earth's orbit)\nT_EARTH = 3.15581491022399999996634126e7 # s (orbital period of Earth)\n\n# Sun constants\nSUN_MASS = 1.988435e30 # kg\nSUN_RADIUS = 6.95700e5 # km\n\n# Earth constants\nEARTH_RADIUS = 6378.1 # km (equatorial)\nEARTH_MASS = 5.9721986e24 # kg\n# EARTH_MASS = 0\n\n# Lunar quantities\nLUNAR_RADIUS = 1737.1 # km\nLUNAR_MASS = 7.34767309e22 # kg\nEARTH_MOON_DISTANCE = 384400.0 # km\nLUNAR_ORBITAL_DURATION = 27.322 # days\n\n# Mars quantities\nMARS_RADIUS = 3396.2 # km (equatorial)\nMARS_MASS = 6.41693e23 # kg\n# MARS_MASS = 0\n\n\n############### DERIVED CONSTANTS ###############\nSUN_MU = G * SUN_MASS # km^-3 s^2\nEARTH_MU = G * EARTH_MASS # km^-3 s^2\nMARS_MU = G * MARS_MASS # km^-3 s^2\n\n\ndef update_constants_json():\n \"\"\" Write constants to constants.json file in same directory\"\"\"\n\n # Write constants to text file\n constants_dict = {\n ############### TABLE / PHYSICAL CONSTANTS ###############\n \"G\": G,\n \"DAY\": DAY,\n \"a_EARTH\": a_EARTH,\n \"T_EARTH\": T_EARTH,\n \"SUN_MASS\": SUN_MASS,\n \"EARTH_RADIUS\": EARTH_RADIUS,\n \"EARTH_MASS\": EARTH_MASS,\n \"LUNAR_RADIUS\": LUNAR_RADIUS,\n \"LUNAR_MASS\": LUNAR_MASS,\n \"EARTH_MOON_DISTANCE\": EARTH_MOON_DISTANCE,\n \"LUNAR_ORBIT_DURATION\": LUNAR_ORBITAL_DURATION,\n \"MARS_RADIUS\": MARS_RADIUS,\n \"MARS_MASS\": MARS_MASS,\n ############### DERIVED CONSTANTS ###############\n \"SUN_MU\": SUN_MU,\n \"EARTH_MU\": EARTH_MU,\n \"MARS_MU\": MARS_MU,\n }\n\n orbsim_path = os.path.dirname(os.path.abspath(__file__))\n\n with open(orbsim_path + \"/constants.json\", \"w\", newline=\"\\n\") as file:\n file.write(json.dumps(constants_dict, indent=2))\n\n\nif __name__ == \"__main__\":\n update_constants_json()\n" }, { "alpha_fraction": 0.4440678060054779, "alphanum_fraction": 0.4576271176338196, "avg_line_length": 21.69230842590332, "blob_id": "17964a162ee784b0537bb24884e1d703d4a8d4a4", "content_id": "973d4fdd1dfe45c6996168c4c17e1248aec0b3e4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 295, "license_type": "permissive", "max_line_length": 35, "num_lines": 13, "path": "/code/cudasim/symplectic.h", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <cuda_runtime.h>\n\n__device__\nvoid symplectic(double x0,\n double y0,\n double p0_x,\n double p0_y,\n double maxDuration,\n double maxIter,\n double* score,\n bool* success);\n" }, { "alpha_fraction": 0.5350180268287659, "alphanum_fraction": 0.7906137108802795, "avg_line_length": 31.20930290222168, "blob_id": "a54d45717a7b41d27c358f58e9cecbc4b4de5aeb", "content_id": "ee295cccf04916ead16d857332386737bc4109d8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1385, "license_type": "permissive", "max_line_length": 58, "num_lines": 43, "path": "/code/cudasim/constants.h", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "#pragma once\n\n#define G 6.67384e-20\n#define DAY 86400.0\n#define a_EARTH 149597887.0\n#define T_EARTH 31558149.0\n#define SUN_MASS 1.988435e+30\n#define EARTH_RADIUS 6367.4447\n#define EARTH_MASS 5.9721986e+24\n#define LUNAR_RADIUS 1737.1\n#define LUNAR_MASS 7.34767309e+22\n#define EARTH_MOON_DISTANCE 384400.0\n#define LUNAR_ORBIT_DURATION 27.322\n#define MARS_RADIUS 3389.5\n#define MARS_MASS 6.41693e+23\n#define SUN_MU 132704970404.0\n#define EARTH_MU 398574.97904624\n#define MARS_MU 42825.5641112\n\n#define EARTH_ALTITUDE 160.0\n#define LUNAR_ALTITUDE 100.0\n#define ORBITAL_TOLERANCE 10\n#define h_DEFAULT 1e-06\n#define h_MIN 1e-10\n#define STEP_ERROR_TOLERANCE 1e-09\n#define K 0.012153601852294929\n#define LUNAR_POSITION_X 0.987846398147705\n#define EARTH_POSITION_X -0.012153601852294929\n#define L1_POSITION_X 0.8405854649886768\n#define LEO_RADIUS 6527.4447\n#define LLO_RADIUS 1837.1\n#define LEO_VELOCITY 7.8141800786375315\n#define LLO_VELOCITY 1.6337906619944857\n#define UNIT_LENGTH 384400.0\n#define UNIT_TIME 4.348431355156764\n#define UNIT_VELOCITY 1.0231446033517257\n#define LEO_RADIUS_NONDIM 0.016980865504682623\n#define LEO_VELOCITY_NONDIM 7.6374151347121515\n\n//#define LEO_VELOCITY sqrt(G * EARTH_MASS / (LEO_RADIUS))\n//#define LLO_VELOCITY sqrt(G * LUNAR_MASS / (LLO_RADIUS))\n//#define LEO_RADIUS_NONDIM LEO_RADIUS / UNIT_LENGTH\n//#define LEO_VELOCITY_NONDIM LEO_VELOCITY / UNIT_VELOCITY\n" }, { "alpha_fraction": 0.7246376872062683, "alphanum_fraction": 0.7246376872062683, "avg_line_length": 21.66666603088379, "blob_id": "1ada5c644fbedc3a54d775bd6b90467b9592b98d", "content_id": "285814283671c900d434eb192530ff2b11076fb7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 69, "license_type": "permissive", "max_line_length": 45, "num_lines": 3, "path": "/code/marscudasim/martian_cuda_rocketry.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "from ctypes import *\n\ncudasim = cdll.LoadLibrary(\"./libcudasim.so\")\n\n" }, { "alpha_fraction": 0.5220779180526733, "alphanum_fraction": 0.5555194616317749, "avg_line_length": 28.056604385375977, "blob_id": "ef1d0a15114bf26cdb1b633a23e98246ff193717", "content_id": "0eca867b5fd6542d5344ace3e9bc50a4b3271296", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3080, "license_type": "permissive", "max_line_length": 88, "num_lines": 106, "path": "/code/run_r4b.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"Run the R4B simulator\"\"\"\n\nimport logging\nimport pathlib\nimport sys\n\nfrom orbsim.r4b_3d import UNIT_TIME\n\nfrom orbsim.r4b_3d.initial_conditions import (\n get_circular_sun_orbit_position_and_velocity,\n get_leo_position_and_velocity,\n)\nfrom orbsim.r4b_3d.logging import logging_setup\nfrom orbsim.r4b_3d.mplotting import all_plots_r4b_orbitplot\nfrom orbsim.r4b_3d.simulation import simulate\n\nlogging_setup()\n\nlogger = logging.getLogger()\n\n\nif __name__ == \"__main__\":\n\n try:\n MODE = sys.argv[1]\n except IndexError:\n MODE = \"leo\" # <-- INPUT DEMO / SEARCH PARAMETER HERE\n\n mode_dict = {\n # Keys: Possible input arguments (argv)\n # Values: Output folder name of associated log/figs of run\n # Simple simulation without burn\n \"leo\": \"demo_leo\",\n \"sun\": \"demo_circular_sun_orbit\",\n \"mars\": \"demo_mars_transfer\",\n }\n\n MODE_NAME = mode_dict[MODE]\n OUTPUT_DIR = \"runs/r4b_3d/\" + MODE_NAME + \"/\"\n pathlib.Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)\n\n # Run simulate() with some initial conditions\n if MODE_NAME == \"demo_leo\":\n # Simple LEO without burn\n day = 0\n max_year = \"2020\"\n h = 1 * 30 / UNIT_TIME\n max_duration = 3600 * 12 / UNIT_TIME\n max_iter = 1e6\n\n Q0, B0 = get_leo_position_and_velocity(day=day, altitude=160, max_year=max_year)\n ts, Qs, Bs, (\n t_final,\n i_final,\n ), ephemerides, eph_body_coords, body_distances = simulate(\n psi=(day, Q0, B0, None),\n max_year=max_year,\n h=h,\n max_duration=max_duration,\n max_iter=max_iter,\n )\n\n elif MODE_NAME == \"demo_circular_sun_orbit\":\n # Simple circular orbit around sun, pos (1,0,0) AU, unit vel (0,1,0)\n day = 0\n max_year = \"2039\"\n h = 3600 * 24 / UNIT_TIME\n max_duration = 1\n max_iter = 1e6\n\n Q0, B0 = get_circular_sun_orbit_position_and_velocity()\n ts, Qs, Bs, (\n t_final,\n i_final,\n ), ephemerides, eph_body_coords, body_distances = simulate(\n psi=(day, Q0, B0, None),\n max_year=max_year,\n h=h,\n max_duration=max_duration,\n max_iter=max_iter,\n )\n\n elif MODE_NAME == \"demo_mars_transfer\":\n # # Hohmann transfer orbit to Mars\n day = 50\n max_year = \"2039\"\n h = 60 / UNIT_TIME\n # max_duration = 3600 * 24 * 300 / UNIT_TIME\n max_duration = 3600 * 24 * 200 / UNIT_TIME\n max_iter = 1e6\n burn0 = 3.62 # burn delta-v in km/s\n\n Q0, B0 = get_leo_position_and_velocity(day=day, altitude=160, max_year=max_year)\n ts, Qs, Bs, (\n t_final,\n i_final,\n ), ephemerides, eph_body_coords, body_distances = simulate(\n psi=(day, Q0, B0, burn0),\n max_year=max_year,\n h=h,\n max_duration=max_duration,\n max_iter=max_iter,\n )\n\n # PLOT THINGS\n all_plots_r4b_orbitplot(Qs, ts, t_final, max_year)\n" }, { "alpha_fraction": 0.8026666641235352, "alphanum_fraction": 0.8100000023841858, "avg_line_length": 99, "blob_id": "0af9d5033556d3a3b58a235240fe3de649e4ecc5", "content_id": "9fc875e97500e1d36593ea2bf05fdd57121889d8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1501, "license_type": "permissive", "max_line_length": 997, "num_lines": 15, "path": "/thesis-description.md", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "# Thesis Title\n\n**EN:** Low Energy Transfer Orbits to Mars using Evolution Strategies\n\n**DA:** Lavenergetiske overførslesbaner til Mars med Evolution Strategies\n\n# Thesis description\n\nWhen navigating a spacecraft between various orbits, low energy transfer orbits (LETOs) allows us to make the trade-off of a longer flight time in return for lower propellant energy requirements, suitable for unmanned space missions. In 2015 we explored the limits of LETOs from Earth to Moon using a simple restricted 3-body model with just two engine burns and GPU accelerated simulations. These simulations were searching the space for LETOs in a brute force manner, shooting out from low earth orbit in many random directions and velocity vectors. To validate the model and simulation, we were able to recreate the parameters of the Hohmann transfer orbit of the Apollo missions to within 2.5% delta-v and found some LETOs with some of the lowest $\\Delta v$ values we have seen in the literature. Now we want to explore the use of more intelligent optimization strategies, in particular evolution strategies, for finding LETOs. We also want to expand the model to explore LETOs to planet Mars.\n\nNow we want to explore the use of more intelligent optimization strategies, in particular evolution strategies, for finding LETOs. We also want to expand the model to a restricted 4-body model, in order to explore LETOs to planet Mars.\n\n# Thesis plan\n\nhttps://my.mindnode.com/JyGhsgkywQz8SmzRoZKpjzc6KpiGcQPS8zvQgWVx\n" }, { "alpha_fraction": 0.43357470631599426, "alphanum_fraction": 0.4701777398586273, "avg_line_length": 32.16593933105469, "blob_id": "ad862d543014b60afa5dc377ec2cd69107c654d7", "content_id": "4c5e34757609316ebb771f5835d6fafbb472de51", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7595, "license_type": "permissive", "max_line_length": 140, "num_lines": 229, "path": "/code/marscudasim/run_multi_r4b.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"Run the R4B simulator\"\"\"\n\nimport logging\nimport pathlib\nimport sys\n\nfrom math import pi, floor\n\nimport numpy as np\nfrom numpy import arange\n\nfrom orbsim import DAY\nfrom orbsim.r4b_3d import UNIT_TIME\n\n#from orbsim.r4b_3d.initial_conditions import (\n# get_leo_position_and_velocity,\n# get_circular_sun_orbit_position_and_velocity,\n#)\nfrom marscudasim.initial_conditions import (\n get_leo_positions_and_velocities,\n get_leo_positions_and_velocities_C,\n)\nfrom orbsim.r4b_3d.logging import logging_setup\nfrom orbsim.r4b_3d.mplotting import all_plots_r4b_orbitplot\n#from orbsim.r4b_3d.simulators import simulate as simulate_ref\nfrom marscudasim.simulators import simulate, simulate_single\n\nlogging_setup()\n\nlogger = logging.getLogger()\n\n\nif __name__ == \"__main__\":\n\n try:\n MODE = sys.argv[1]\n except IndexError:\n MODE = \"multi\" # <-- INPUT DEMO / SEARCH PARAMETER HERE\n\n mode_dict = {\n # Keys: Possible input arguments (argv)\n # Values: Output folder name of associated log/figs of run\n # Simple simulation without burn\n \"leo\": \"demo_leo\",\n \"sun\": \"demo_circular_sun_orbit\",\n \"multi\": \"leo_multiple_trajectories\",\n \"single\": \"leo_plot_single\",\n }\n\n MODE_NAME = mode_dict[MODE]\n OUTPUT_DIR = \"results/r4b_3d/\" + MODE_NAME + \"/\"\n pathlib.Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)\n\n # Run simulate() with some initial conditions\n if MODE_NAME == \"demo_leo\":\n\n # Simple LEO without burn\n day = 0\n max_year = \"2020\"\n h = 60 / UNIT_TIME\n max_duration = 3600 * 24 / UNIT_TIME\n max_iter = 1e6\n\n Q0, B0 = get_leo_position_and_velocity(day=day, altitude=160, max_year=max_year)\n ts, Qs, Bs, (t_final, i_final), ephemerides = simulate(\n psi=(day, Q0, B0, None),\n max_year=max_year,\n h=h,\n max_duration=max_duration,\n max_iter=max_iter,\n number_of_paths=1,\n fan_delta=0,\n coordinate_no=0\n )\n\n elif MODE_NAME == \"demo_circular_sun_orbit\":\n\n # Simple circular orbit around sun, pos (1,0,0) AU, unit vel (0,1,0)\n day = 0\n max_year = \"2039\"\n h = 3600 * 24 / UNIT_TIME\n max_duration = 1\n max_iter = 1e6\n\n Q0, B0 = get_circular_sun_orbit_position_and_velocity()\n ts, Qs, Bs, (t_final, i_final), ephemerides = simulate(\n psi=(day, Q0, B0, None),\n max_year=max_year,\n h=h,\n max_duration=max_duration,\n max_iter=max_iter,\n number_of_paths=1,\n fan_delta=0,\n coordinate_no=0\n )\n\n elif MODE_NAME == \"leo_multiple_trajectories\":\n\n # Simple LEO without burn\n #day = 0.0\n days = np.linspace(0.5, 365 + 364.5, 2 * 365)\n burndvs = np.linspace(2.52, 3.51, 100)\n\n # First refinement search\n day = 310.5 + 6 * 687 - 80\n dday = 20\n burn = 3.86\n dburn = 0.2\n tilt = (15.0 / 180.0) * pi\n dtilt = (15.0 / 180.0) * pi \n\n # Second refinement search\n day = 4351.5\n dday = 1\n burn = 3.91\n dburn = 0.04\n tilt = 0.523598775598\n dtilt = 0.04\n\n # Specify search space\n days = np.linspace(day - dday, day + dday, 5)\n burndvs = np.linspace(burn - dburn, burn + dburn, 41)\n tilts = np.linspace(tilt - dtilt, tilt + dtilt, 81)\n\n max_year = \"2039\"\n h = 1 / UNIT_TIME # 0.1 seconds\n max_duration = 290 * DAY / UNIT_TIME\n max_iter = 100000000\n day0s, Q0s, B0s = get_leo_positions_and_velocities_C(days=days,\n burndvs=burndvs,\n tilts=tilts,\n h=h,\n altitude=160,\n max_year=max_year)\n\n logging.info(\"-------------------------------------------- CPU -------------------------------------------------------------------\")\n \"\"\"\n ts, Qs, Bs, (t_final, i_final), ephemerides = simulate_ref(\n psi=(day, Q0, B0, None),\n max_year=max_year,\n h=h,\n max_duration=max_duration,\n max_iter=max_iter,\n )\n \"\"\"\n logging.info(\"-------------------------------------------- GPU -------------------------------------------------------------------\")\n\n arives, scores = simulate(\n psi=(day0s, Q0s, B0s, None),\n max_year=max_year,\n h=h,\n max_duration=max_duration,\n max_iter=max_iter,\n )\n print(\"Total paths=\", len(scores))\n print(\"days=\", days)\n print(\"burndvs=\", burndvs)\n print(\"tilts=\", tilts)\n bestscores = np.sort(scores)\n bestidx = np.argsort(scores)\n bestarives = arives[bestidx]\n for s in range(min(len(scores), 20)):\n n = bestidx[s]\n i = int(floor(n / (len(burndvs) * len(tilts))))\n m = n % (len(burndvs) * len(tilts))\n j = int(floor(m / len(tilts)))\n k = m % len(tilts)\n print(\"pathNo=\", n,\n \"duration=\", bestarives[s] - days[i],\n \"dmars=\", bestscores[s], \"km\",\n \"| day=\", days[i],\n \"burndv=\", burndvs[j],\n \"tilt=\", tilts[k],\n \"Q=\", Q0s[n],\n \"B=\", B0s[n])\n \n #best = list([bestidx[0:10], bestarivees[0:10], bestscores[0:10]])\n #print(best[0])\n #print(best[1])\n #print(best[2])\n\n \"\"\"\n ts, Qs, Bs, (t_final, i_final), ephemerides = simulate(\n psi=(day, Q0, B0, None),\n max_year=max_year,\n h=h/2,\n max_duration=max_duration,\n max_iter=max_iter,\n number_of_paths=number_of_paths,\n fan_delta=fan_delta,\n coordinate_no=coordinate_no,\n )\n \"\"\"\n elif MODE_NAME == \"leo_plot_single\":\n\n # Simple LEO without burn\n #day = 0.0\n days = np.array([271.5])\n burndvs = np.array([3.51])\n days = np.array([518.5])\n burndvs = np.array([4.1])\n\n # Mars hit trajectory\n days = np.array([4351.5])\n burndvs = np.array([0 * 3.918])\n tilts = np.array([0.557598775598])\n\n max_year = \"2039\"\n h = 1 / UNIT_TIME # 0.1 seconds\n max_duration = 300 * DAY / UNIT_TIME\n max_iter = 100000000\n day0s, Q0s, B0s = get_leo_positions_and_velocities(days=days,\n burndvs=burndvs,\n tilts=tilts,\n h=h,\n altitude=160,\n max_year=max_year)\n ts, Qs, i_final = simulate_single(\n psi=(day0s, Q0s, B0s, None),\n max_year=max_year,\n h=h,\n max_duration=max_duration,\n max_iter=max_iter,\n )\n # PLOT THINGS\n i_final = i_final[0]\n i_plot = np.linspace(0, i_final - 1, min(1000, i_final)).astype(int)\n #print(i_final, UNIT_TIME * ts[i_final] / DAY, Qs[i_final])\n all_plots_r4b_orbitplot(Qs[i_plot,:], ts[i_plot], ts[i_final], max_year)\n" }, { "alpha_fraction": 0.7278106212615967, "alphanum_fraction": 0.7721893787384033, "avg_line_length": 47, "blob_id": "8a0f0c5a69b4f4027ef8e9cb75af5a905fb7b78c", "content_id": "874aebf0fb960d7cbb3b258cc3ad056b936437cd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 338, "license_type": "permissive", "max_line_length": 168, "num_lines": 7, "path": "/interesting-papers.md", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "# Interesting papers\n\nA collection of references to articles that are *potentially* interesting / relevant for the LETOMES project, but not with such a confidence to import to into Mendeley.\n\n\n\nJune 12, 2018: [Escape Trajectories from the L2 Point of the Earth-Moon System](https://www.jstage.jst.go.jp/article/tjsass/57/4/57_238/_pdf) \n\n" }, { "alpha_fraction": 0.5075140595436096, "alphanum_fraction": 0.5347526669502258, "avg_line_length": 23.288972854614258, "blob_id": "cd6d4a4de8f55a55f8205e9d9dfb4512a4748310", "content_id": "ce326d11c75f1b8dce0024984a99341039ed1460", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6388, "license_type": "permissive", "max_line_length": 87, "num_lines": 263, "path": "/code/orbsim/r4b_3d/coordinate_system.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nEquations related to the cartesian and spherical coordinate system.\n\n* Get unit vectors\n* Euclidian distances between two points\n* Position coordinate conversions\n* Velocity coordinate conversions\n* Speed from position and velocity\n\"\"\"\nfrom math import acos, atan2, cos, pi, sin, sqrt\n\n# region Keeping Angles in Intervals\n\n\ndef keep_theta_in_interval_zero_to_pi(v):\n v = v % (2 * pi)\n\n if v > pi:\n v = 2 * pi - v\n\n return v\n\n\ndef keep_phi_in_interval_npi_to_pi(v):\n v = v % (2 * pi)\n\n if v > pi:\n v = v - 2 * pi\n\n return v\n\n\n# endregion\n\n\n# region Distances\ndef get_distance_cartesian(u, v):\n \"\"\"Get distance between two sets of cartesian coordinates by Pythagoras.\"\"\"\n x1, y1, z1 = u\n x2, y2, z2 = v\n\n return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2)\n\n\ndef get_distance_spherical(u, v):\n \"\"\"Get distance between two sets of spherical coordinates.\"\"\"\n r1, theta1, phi1 = u\n r2, theta2, phi2 = v\n\n return sqrt(\n r1 ** 2\n + r2 ** 2\n - 2\n * r1\n * r2\n * (\n cos(theta1) * cos(theta2)\n + sin(theta1)\n * sin(theta2)\n * (cos(phi1) * cos(phi2) + sin(phi1) * sin(phi2))\n )\n )\n\n\n# endregion\n\n\n# region Position Coordinate Conversions\ndef get_position_cartesian_from_spherical(r, theta, phi):\n \"\"\"\n Get cartesian (x,y,z) coordinates from spherical (r, theta, phi) coordinates,\n in the standard range r > 0, 0 < theta < pi, -pi< phi <= pi.\n\n Arguments:\n r {float} -- Radial coordinate (length of position vector)\n theta {float} -- Theta angle (rad), angle from z-axis to position vector.\n phi {float} -- Phi angle (rad), angle from a-axis to point projected to x-y\n plane.\n\n Raises:\n ValueError -- r cannot be zero (to ensure unique solution)\n ValueError -- theta cannot be zero (to ensure unique solution)\n\n Returns:\n List[float] -- Cartesian [x, y, z] coordinates corresponding to spherical input\n coordinates.\n \"\"\"\n if r <= 0:\n raise ValueError(\"r cannot be less than or equal to zero.\")\n if theta <= 0 or theta >= pi:\n raise ValueError(\"theta must be in range 0 < theta < pi.\")\n #if phi <= -pi or phi > pi:\n # raise ValueError(\"phi must be in range -pi < phi <= pi.\")\n\n x = r * sin(theta) * cos(phi)\n y = r * sin(theta) * sin(phi)\n z = r * cos(theta)\n\n return x, y, z\n\n\ndef get_position_spherical_from_cartesian(x, y, z):\n \"\"\"Get spherical (r, theta, phi) coordinates from cartesian (x,y,z) coordinates\"\"\"\n if x == 0.0 and y == 0.0:\n raise ValueError(\n \"\"\"\n x=0 and y=0 encountered; cartesian coordinate along z-axis results in\n indeterminate expression for (r,theta,phi).\n \"\"\"\n )\n r = sqrt(x ** 2 + y ** 2 + z ** 2)\n theta = acos(z / r)\n phi = atan2(y, x)\n\n return r, theta, phi\n\n\n# endregion\n\n\n# region Velocity Coordinate Conversions\ndef get_velocity_spherical_from_cartesian(v, vdot):\n \"\"\"\n Get velocity vector in spherical coordinates (rdot, thetadot, phidot)\n from cartesian coordinates.\n \"\"\"\n x, y, z = v\n xdot, ydot, zdot = vdot\n\n if x == 0 and y == 0:\n raise ValueError(\"Position can't be on z-axis (x==0 and y==0).\")\n\n rdot = (x * xdot + y * ydot + z * zdot) / (sqrt(x ** 2 + y ** 2 + z ** 2))\n\n thetadot = ((x * xdot + y * ydot) * z - (x ** 2 + y ** 2) * zdot) / (\n (x ** 2 + y ** 2 + z ** 2) * sqrt(x ** 2 + y ** 2)\n )\n\n phidot = (x * ydot - xdot * y) / (x ** 2 + y ** 2)\n\n return (rdot, thetadot, phidot)\n\n\ndef get_velocity_cartesian_from_spherical(v, vdot):\n \"\"\"Get velocity vector in cartesian coordinates (x, y, z) from spherical\n coordinates (r, theta, phi) and spherical velocity (rdot, thetadot, phidot).\n\n Arguments:\n v {List[float]} -- xdot, ydot, zdot\n \"\"\"\n r, theta, phi = v\n rdot, thetadot, phidot = vdot\n\n xdot = (\n rdot * sin(theta) * cos(phi)\n + r * thetadot * cos(theta) * cos(phi)\n - r * phidot * sin(theta) * sin(phi)\n )\n\n ydot = (\n rdot * sin(theta) * sin(phi)\n + r * thetadot * cos(theta) * sin(phi)\n + r * phidot * sin(theta) * cos(phi)\n )\n\n zdot = rdot * cos(theta) - r * thetadot * sin(theta)\n\n return xdot, ydot, zdot\n\n\n# endregion\n\n# region Speeds\ndef get_speed_spherical(r, theta, rdot, thetadot, phidot):\n \"\"\"Get speed of body given in spherical coordinates.\"\"\"\n v = sqrt(\n rdot ** 2 + r ** 2 * thetadot ** 2 + r ** 2 * sin(theta) ** 2 * phidot ** 2\n )\n\n return v\n\n\ndef get_speed_cartesian(xdot, ydot, zdot):\n \"\"\"Get speed of body given in cartesian coordinates.\"\"\"\n v = sqrt(xdot ** 2 + ydot ** 2 + zdot ** 2)\n\n return v\n\n\n# endregion\n\n\n# # region UNUSED AND UNTESTED\n# # ---Unit Vectors (Spherical)\n# def get_unit_r_in_cartesian(theta, phi):\n# \"\"\"Get spherical r unit vector in cartesian coordinates\"\"\"\n# R_hat = (\n# sin(theta) * cos(phi) * np.array([1, 0, 0])\n# + sin(theta) * sin(phi) * np.array([0, 1, 0])\n# + cos(theta) * np.array([0, 0, 1])\n# )\n\n# return R_hat\n\n\n# def get_unit_theta_in_cartesian(theta, phi):\n# \"\"\"Get spherical theta unit vector in cartesian coordinates\"\"\"\n# theta_hat = (\n# cos(theta) * cos(phi) * np.array([1, 0, 0])\n# + cos(theta) * sin(phi) * np.array([0, 1, 0])\n# - sin(theta) * np.array([0, 0, 1])\n# )\n\n# return theta_hat\n\n\n# def get_unit_phi_in_cartesian(phi):\n# \"\"\"Get spherical phi unit vector in cartesian coordinates\"\"\"\n# phi_hat = -sin(phi) * np.array([1, 0, 0]) + cos(phi) * np.array([0, 1, 0])\n\n# return phi_hat\n\n\n# endregion\n\n# if __name__ == \"__main__\":\n\n# vs = [\n# 0,\n# 45,\n# 90,\n# 179,\n# 180,\n# 181,\n# 541,\n# 901,\n# 270,\n# 359,\n# 360,\n# 361,\n# 0,\n# -45,\n# -90,\n# -179,\n# -180,\n# -181,\n# -541,\n# -901,\n# -270,\n# -359,\n# -360,\n# -361,\n# ]\n\n# vs = [x * pi / 180 for x in vs]\n\n# test = list(map(keep_phi_in_interval_npi_to_pi, vs))\n# test2 = list(map(keep_theta_in_interval_zero_to_pi, vs))\n\n# from pprint import pprint\n\n# pprint(test)\n# pprint(test2)\n" }, { "alpha_fraction": 0.5526027083396912, "alphanum_fraction": 0.5681967735290527, "avg_line_length": 31.52142906188965, "blob_id": "ac7cee32f3f9c387278972499cc960d0676a807e", "content_id": "38319392208f296e5f1e7459955be28870de8234", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4553, "license_type": "permissive", "max_line_length": 128, "num_lines": 140, "path": "/code/pyscripts/raw_rocketry.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "from orbsim.r3b_2d.simulators import run_sim\nfrom multiprocessing import Pool\nimport multiprocessing as mp\n\nimport numpy as np\nimport pygmo as pg\n\n# from orbsim.plotting import orbitplot2d, orbitplot_non_inertial\n# from numba import njit\nfrom math import pi\nfrom scipy.stats import rankdata\n\nfrom orbsim.r3b_2d.analyticals import (\n ensure_bounds,\n random_disjoint_intervals,\n collapse_intervals,\n)\n\ntau = pi * 2\nbounds = {\n \"pos\": np.array([[3.8, 5.0]]),\n \"ang\": np.array([[0, 0.02]]),\n \"burn\": np.array([[3.1, 3.15]]),\n}\n\n\n# @njit\ndef evolve(psis):\n init_sigma = 0.003\n init_alpha = 0.0003 # learningrate\n init_psis = psis\n\n iterations = 50\n # for each iteration, jitter around starting points, and move in the\n # best direction (weighted average jitter coordinates according to\n # fitness score)\n for idx in range(len(psis)):\n sigma = init_sigma\n alpha = init_alpha\n final_scores = []\n for _ in range(iterations):\n psi = psis[idx]\n noise = np.random.randn(25, 3)\n epsis = psi + sigma * noise # the point cloud around psi\n epsis = [ensure_bounds(epsi, bounds.values()) for epsi in epsis]\n\n \"\"\"calculate the reward in the cloud\"\"\"\n score, success = fitness(psi)\n print(f\"individual {idx}: {score}, {psi}\")\n e_scores = np.zeros(len(epsis))\n e_successes = np.zeros(len(epsis))\n for jdx in range(len(epsis)):\n epsi = epsis[jdx]\n e_scores[jdx], e_successes[jdx] = fitness(\n epsi\n ) # launch a simulation for each point\n ranked_scores = fitness_shape(epsis, e_scores, e_successes)\n\n step_norm = np.dot(\n -1 * ranked_scores, noise\n ) # problem is flipped from maximization to minimization here. (*-1)\n step = alpha * step_norm\n psis[idx] = psi + step # mutate the population/take the step\n # sigma = min(1.2, init_sigma, init_sigma * (psi_reward * 8))\n # alpha = max(0.8, min(init_alpha, init_alpha * (psi_reward * 8)))\n final_scores.append(score)\n [\n print(\n f\"psis[{idx}] went from {init_psis[idx]} to {psis[idx]} in {iterations} iterations, final score={final_scores[idx]}\"\n )\n for idx in range(len(psis))\n ]\n return psis\n\n # for i in range(iterations):\n\n # # do the jittering and selection\n # for psi in psis:\n # noise = np.random.randn(10, 3)\n # epsis = [\n # [psi_0, psi_1, psi_2]\n # for [psi_0, psi_1, psi_2] in np.expand_dims(psi, 0)\n # + sigma * noise\n # ]\n\n # reward = np.array(\n # [-launch_sim(epsi, duration=10, max_iter=1e7)[0] for epsi in epsis]\n # )\n # reward -= reward.mean()\n # reward /= reward.std()\n # step_norm = np.dot(reward, noise) # F, in the literature\n # step = alpha * step_norm\n # print(\"new individual = {str(psi+step)}\")\n # psi += step # mutate the population/take the step\n # return 0\n\n\n# @njit\ndef fitness_shape(psis, scores, successes):\n for idx in range(len(scores)):\n if successes[idx]:\n scores[idx] = scores[idx] + psis[idx][2] # just add burnDv\n else:\n scores[idx] = scores[idx] + 20 + psis[idx][2] # punish for not hitting\n\n ranked_scores = rankdata(scores)\n mean = ranked_scores.mean()\n ranked_scores -= mean\n ranked_scores /= ranked_scores.std()\n ranked_scores = [min(0, rscore) for rscore in ranked_scores]\n # neutralize negative influence by making very poor fitnesses equal to the mean fitness\n return np.array(ranked_scores).transpose()\n\n\n# @njit\ndef fitness(psi):\n score, success, _ = run_sim(psi, duration=200, max_iter=1e7)\n return [score, success]\n\n\ndef initialize_psis(n, bounds):\n psis = [[random_disjoint_intervals(bound) for bound in bounds] for _ in range(n)]\n return psis\n\n\nif __name__ == \"__main__\":\n mag = mp.cpu_count()\n nIndividuals = mag\n p = Pool(mag)\n nBuckets = mag\n # evolve(np.array(initialize_psis(nIndividuals, bounds.values())))\n result = p.map(\n evolve,\n [\n np.array(initialize_psis(int(nIndividuals / nBuckets), bounds.values()))\n for _ in range(mp.cpu_count())\n ],\n )\n print(result)\n # print(p.starmap(evolve, [(pop, 2), (pop, 4), (pop, 6), (pop, 8)]))\n" }, { "alpha_fraction": 0.438658207654953, "alphanum_fraction": 0.46270233392715454, "avg_line_length": 27.325580596923828, "blob_id": "40caf1a239ba3584fca70e5c69619f8426f67ba2", "content_id": "834823f57671a2c37cd73ff6860724e3ac86a5b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8526, "license_type": "permissive", "max_line_length": 146, "num_lines": 301, "path": "/code/r3b_bsc/search.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nBrute-force search Module for reduced 3-body solver\n===================================================\n\nFunctions:\n\nWe assume **TODO FILL OUT HERE!\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport sys\nimport time\nfrom math import ceil\nfrom multiprocessing import Pool\n\nimport numpy as np\n\nfrom orbsim import DAY\nfrom orbsim.r3b_2d import (\n EARTH_POSITION_X,\n LEO_RADIUS_NONDIM,\n LEO_VELOCITY,\n UNIT_TIME,\n UNIT_VELOCITY,\n)\n\nfrom .symplectic import symplectic\n\n\ndef print_search_results(stat, pos, ang, burn, x0, y0, p0_x, p0_y, dv, toa):\n print(\n \"# --------------------------------------------------------------------------\"\n )\n print(\"duration = %i/unit_time\" % (max(1, int(ceil(toa * UNIT_TIME)))))\n print(\"pos = %.15lf\" % (pos))\n print(\"ang = %.15lf\" % (ang))\n print(\"burn = %.15lf/unit_velocity\" % (burn * UNIT_VELOCITY))\n print(\"x0 = %.15lf\" % (x0))\n print(\"y0 = %.15lf\" % (y0))\n print(\"p0_x = %.15lf\" % (p0_x))\n print(\"p0_y = %.15lf\" % (p0_y))\n print(\n \"# --------------------------------------------------------------------------\"\n )\n print(\"# dV(earth-escape) = %f km/s\" % (abs(burn) * UNIT_VELOCITY))\n if stat > 0 and stat < 100:\n print(\"# Min moon distance= %f\" % (stat))\n elif stat < 0:\n print(\"# dV(moon-capture) = %f km/s\" % (dv * UNIT_VELOCITY))\n print(\"# dV(total) = %f km/s\" % ((abs(burn) + dv) * UNIT_VELOCITY))\n print(\"# Flight-time = %f days\" % (toa * UNIT_TIME))\n else:\n print(\"# Crashed on earth!\")\n print(\n \"# --------------------------------------------------------------------------\"\n )\n sys.stdout.flush()\n\n\ndef search(thread, threads, n, duration, positions, angles, burns):\n\n # print(\"Start thread=%i\" % (thread))\n\n # Initialize arrays\n xs = np.zeros(n)\n ys = np.zeros(n)\n p_xs = np.zeros(n)\n p_ys = np.zeros(n)\n step_errors = np.zeros(n)\n h_list = np.zeros(n)\n info = np.zeros(2)\n\n # Search for orbits\n trials = len(positions) * len(angles) * len(burns)\n ld1 = len(angles) * len(burns)\n ld2 = len(burns)\n trial = 0\n hit_earth = 0\n hit_moon = 0\n best_status = 1e9\n progress = -1\n i = thread\n while i < trials:\n\n # One-to-one mapping of i -> (pos_i,ang_i,burn_i)\n pos_i = i // ld1\n ang_i = (i - pos_i * ld1) // ld2\n burn_i = i - pos_i * ld1 - ang_i * ld2\n i += threads\n\n # Find launch setup\n pos = positions[pos_i]\n ang = angles[ang_i]\n burn = burns[burn_i]\n\n # Calculate initial conditions\n x0 = np.cos(pos) * LEO_RADIUS_NONDIM\n y0 = np.sin(pos) * LEO_RADIUS_NONDIM\n v_norm_x = -y0 / LEO_RADIUS_NONDIM\n v_y_norm = x0 / LEO_RADIUS_NONDIM\n v_x = (LEO_VELOCITY / UNIT_VELOCITY) * v_norm_x\n v_y = (LEO_VELOCITY / UNIT_VELOCITY) * v_y_norm\n x0 += EARTH_POSITION_X\n bx = np.cos(ang) * v_norm_x - np.sin(ang) * v_y_norm\n by = np.sin(ang) * v_norm_x + np.cos(ang) * v_y_norm\n p0_x = (\n v_x + burn * bx - y0\n ) # Sign of burn decides rotational direction of launch\n p0_y = v_y + burn * by + x0\n\n # Call symplectic integration\n # status > 0 : Closest distance to moon achieved\n # status < 0 : Hit the moon using status=dV(moon)-10000 to get into orbit\n # status == 100 : Collided with earth\n # if thread == 1:\n # print(n,duration,x0,y0,p0_x,p0_y)\n status = symplectic(\n n,\n duration,\n x0,\n y0,\n p0_x,\n p0_y,\n xs,\n ys,\n p_xs,\n p_ys,\n step_errors,\n h_list,\n info,\n )\n if status == 100:\n hit_earth += 1\n if status < 0:\n hit_moon += 1\n if status < best_status:\n best_status = status\n best_pos = pos\n best_ang = ang\n best_burn = burn\n best_x0 = x0\n best_y0 = y0\n best_p0_x = p0_x\n best_p0_y = p0_y\n best_dv = info[0]\n best_toa = info[1]\n\n # Show progress\n if thread == 0: # only thread 0\n if (100 * trial / (1 + trials // threads)) // 10 > progress:\n progress = (100 * trial / (1 + trials // threads)) // 10\n print(progress * 10, end=\"% \")\n sys.stdout.flush()\n trial += 1\n # if thread == 13:\n # print(\"thread=%i status=%f best_status=%f trial=%i(%i) pos=%f ang=%f burn=%f\" % (thread,status,best_status,trial,trials,pos,ang,burn))\n\n # print(\"End thread=%i\" % (thread))\n\n return (\n best_status,\n best_pos,\n best_ang,\n best_burn,\n best_x0,\n best_y0,\n best_p0_x,\n best_p0_y,\n best_dv,\n best_toa,\n hit_earth,\n hit_moon,\n )\n\n\ndef search_worker(args):\n return search(args[0], args[1], args[2], args[3], args[4], args[5], args[6])\n\n\ndef search_mt(\n threads,\n n,\n duration,\n num_pos,\n num_ang,\n num_burn,\n pos_low,\n pos_high,\n ang_low,\n ang_high,\n burn_low,\n burn_high,\n):\n\n # Time search\n runtime = time.time()\n\n # Set search space\n positions = np.linspace(pos_low, pos_high, num_pos)\n angles = np.linspace(ang_low, ang_high, num_ang)\n burns = np.linspace(burn_low, burn_high, num_burn)\n if num_pos == 1:\n positions[0] = (pos_high + pos_low) / 2.0\n if num_ang == 1:\n angles[0] = (ang_high + ang_low) / 2.0\n if num_burn == 1:\n burns[0] = (burn_high + burn_low) / 2.0\n if num_burn == 2:\n burns[0] = burn_low\n burns[1] = burn_high\n trials = num_pos * num_ang * num_burn\n print(positions)\n print(angles)\n print(burns * UNIT_VELOCITY)\n\n # Set threads\n threads = min(threads, num_pos)\n\n # Do multi-threaded search\n print(\n \"# --------------------------------------------------------------------------\"\n )\n print(\"# Threads: %6i\" % (threads))\n print(\"# Trials: %6i (\" % (trials), end=\"\")\n if threads == 1:\n # Single thread\n best_status, best_pos, best_ang, best_burn, best_x0, best_y0, best_p0_x, best_p0_y, best_dv, best_toa, hit_earth, hit_moon = search(\n 0, num_pos, n, duration, positions, angles, burns\n )\n else:\n # Multi-threading\n chunk = num_pos / threads\n args = [\n [i, threads, n, duration, positions, angles, burns] for i in range(threads)\n ]\n pool = Pool()\n result = pool.map(search_worker, args)\n pool.close()\n pool.join()\n\n # Reduce results from all threads\n best_status = 1e9\n hit_earth = 0\n hit_moon = 0\n for i in range(threads):\n status = result[i][0]\n if status < best_status:\n best_status = status\n best_pos = result[i][1]\n best_ang = result[i][2]\n best_burn = result[i][3]\n best_x0 = result[i][4]\n best_y0 = result[i][5]\n best_p0_x = result[i][6]\n best_p0_y = result[i][7]\n best_dv = result[i][8]\n best_toa = result[i][9]\n hit_earth += result[i][10]\n hit_moon += result[i][11]\n\n print(\"100%)\")\n print(\n \"# No interception: %6i (%i%%)\"\n % (\n trials - hit_earth - hit_moon,\n 100 * (trials - hit_earth - hit_moon) / trials,\n )\n )\n print(\"# Crashed on earth: %6i (%i%%)\" % (hit_earth, 100 * hit_earth / trials))\n print(\"# Hit moon: %6i (%i%%)\" % (hit_moon, 100 * hit_moon / trials))\n runtime = time.time() - runtime\n print(\"# Runtime: %6.2fs\" % (runtime))\n if best_status < 100:\n print_search_results(\n best_status,\n best_pos,\n best_ang,\n best_burn,\n best_x0,\n best_y0,\n best_p0_x,\n best_p0_y,\n best_dv,\n best_toa,\n )\n return (\n best_status,\n best_pos,\n best_ang,\n best_burn,\n best_x0,\n best_y0,\n best_p0_x,\n best_p0_y,\n best_dv,\n best_toa,\n )\n else:\n return best_status, 0, 0, 0, 0, 0, 0, 0, 0, 0\n" }, { "alpha_fraction": 0.6226763129234314, "alphanum_fraction": 0.642879068851471, "avg_line_length": 33.71466827392578, "blob_id": "d18affae590a455eb4faa009a601010a0af35775", "content_id": "a944916d97b29652f2735fb77a6fdc3b4aed47d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13023, "license_type": "permissive", "max_line_length": 88, "num_lines": 375, "path": "/code/orbsim/r4b_3d/initial_conditions.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"Functions that calculate various useful initial conditions for the R4B simulator.\"\"\"\n\nimport logging\nfrom math import degrees, radians\n\nimport numpy as np\n\nfrom orbsim import EARTH_RADIUS, SUN_RADIUS\nfrom orbsim.r4b_3d import UNIT_LENGTH, UNIT_TIME, UNIT_VELOCITY\nfrom orbsim.r4b_3d.coordinate_system import (\n get_position_spherical_from_cartesian,\n get_speed_spherical,\n get_velocity_spherical_from_cartesian,\n)\nfrom orbsim.r4b_3d.ephemerides import get_ephemerides, get_ephemerides_on_day\nfrom orbsim.r4b_3d.equations_of_physics import get_circular_orbit_speed\n\nfrom orbsim.r4b_3d.equations_of_motion import get_B_R, get_B_theta, get_B_phi\n\n\ndef get_leo_position_and_velocity(day=0, altitude=160, max_year=\"2020\"):\n \"\"\"Calculate direction of initial velocity vector.\n Assumes ephemerides are given with 1 day interval. With a series of cross products,\n calculate a LEO position perpendicular and velocity parallel to Earth's velocity\n vector in same direction and plane as Earth.\n\n Arguments:\n ephemerides {[dict of pandas dataframe]} -- ephemerides table.\n day {[int]} -- day (day=0 at 2019-01-01 00:00:00)\n\n Keyword Arguments:\n orientation {str} -- Wanted LEO orbit orientation (default: {'ecliptic'})\n\n Returns:\n List[List[int]] -- Initial LEO position vector, spherical [AU/y, rad, rad]\n initial LEO velocity vector, spherical [AU/y, rad/y, rad/y]\n \"\"\"\n\n # region --- 1 Calculate Earth velocity and speed (cartesian & spherical)\n\n # Earth velocity at day = dr/dt ≈ Δr/Δt, where Δt = 1 day\n # i.e. Earth velocity estimated by difference of position vector 1 day apart / 1 day\n ephemerides = get_ephemerides(max_year=max_year)\n eph_day = get_ephemerides_on_day(ephemerides, day)\n eph_daym1 = get_ephemerides_on_day(ephemerides, day - 1)\n\n earth_day = eph_day[\"earth\"]\n earth_daym1 = eph_daym1[\"earth\"]\n earth_diff = earth_day - earth_daym1\n\n # Get positions of Earth at {day, day-1} in spherical and cartesian coordinates\n earth_q0_spherical_AU_deg = [earth_day[\"r\"], earth_day[\"theta\"], earth_day[\"phi\"]]\n earth_q0_cartesian_AU = [earth_day[\"x\"], earth_day[\"y\"], earth_day[\"z\"]]\n\n earth_qm1_spherical_AU = [\n earth_daym1[\"r\"],\n earth_daym1[\"theta\"],\n earth_daym1[\"phi\"],\n ]\n earth_qm1_cartesian_AU = [earth_daym1[\"x\"], earth_daym1[\"y\"], earth_daym1[\"z\"]]\n\n # Average r and theta needed for speed computation in spherical coordinates\n r_average_AU = np.mean([earth_day[\"r\"], earth_daym1[\"r\"]])\n r_average_km = r_average_AU * UNIT_LENGTH\n logging.debug(f\"r_average: {r_average_AU} AU = {r_average_km} km\")\n\n theta_average_deg = np.mean([earth_day[\"theta\"], earth_daym1[\"theta\"]])\n theta_average_rad = radians(theta_average_deg)\n logging.debug(f\"theta_average: {theta_average_deg} deg = {theta_average_rad} rad\")\n\n # Earth velocity vector in spherical and cartesian coordinates and convert units\n earth_qdot0_spherical_au_day_deg = [\n earth_diff[\"r\"],\n earth_diff[\"theta\"],\n earth_diff[\"phi\"],\n ]\n\n earth_qdot0_spherical_km_s_rad = [\n earth_qdot0_spherical_au_day_deg[0] / (24 * 3600) * UNIT_LENGTH,\n radians(earth_qdot0_spherical_au_day_deg[1]) / (24 * 3600),\n radians(earth_qdot0_spherical_au_day_deg[2]) / (24 * 3600),\n ]\n\n earth_qdot0_cartesian_au_day = [earth_diff[\"x\"], earth_diff[\"y\"], earth_diff[\"z\"]]\n\n earth_qdot0_cartesian_km_s = [\n earth_qdot0_cartesian_au_day[0] / (24 * 3600) * UNIT_LENGTH,\n earth_qdot0_cartesian_au_day[1] / (24 * 3600) * UNIT_LENGTH,\n earth_qdot0_cartesian_au_day[2] / (24 * 3600) * UNIT_LENGTH,\n ]\n\n # Speeds\n earth_qdot0_spherical_km_s_rad_speed = get_speed_spherical(\n r_average_km,\n theta_average_rad,\n earth_qdot0_spherical_km_s_rad[0], # rdot\n earth_qdot0_spherical_km_s_rad[1], # thetadot\n earth_qdot0_spherical_km_s_rad[2], # phidot\n )\n\n earth_qdot0_cartesian_au_day_speed = np.linalg.norm(earth_qdot0_cartesian_au_day)\n\n earth_qdot0_cartesian_km_s_speed = np.linalg.norm(earth_qdot0_cartesian_km_s)\n\n # Logs\n logging.debug(\n f\"Earth initial position at day {day} (cartesian, AU): \"\n f\"{earth_q0_cartesian_AU}\"\n )\n logging.debug(\n f\"Earth initial position at day {day-1} (cartesian, AU): \"\n f\"{earth_qm1_cartesian_AU}\"\n )\n\n logging.debug(\n f\"Earth initial position at day {day} (spherical, AU & deg): \"\n f\"{earth_q0_spherical_AU_deg}\"\n )\n logging.debug(\n f\"Earth initial position at day {day-1} (spherical, AU & deg): \"\n f\"{earth_qm1_spherical_AU}\"\n )\n\n logging.debug(\n f\"Earth initial velocity (spherical, AU/d & deg/d): \"\n f\"{earth_qdot0_spherical_au_day_deg}\"\n )\n logging.debug(\n f\"Earth initial velocity (spherical, km/s & rad/s): \"\n f\"{earth_qdot0_spherical_km_s_rad} \"\n f\"(speed: {earth_qdot0_spherical_km_s_rad_speed})\"\n )\n\n logging.debug(\n f\"Earth initial velocity (cartesian, AU/d): {earth_qdot0_cartesian_au_day}\"\n f\" (speed: {earth_qdot0_cartesian_au_day_speed})\"\n )\n logging.debug(\n f\"Earth initial velocity (cartesian, km/s): {earth_qdot0_cartesian_km_s}\"\n f\" (speed: {earth_qdot0_cartesian_km_s_speed})\"\n )\n\n # endregion\n\n # region --- 2 Earth plane vector\n\n earth_orbital_plane = np.cross(earth_q0_cartesian_AU, earth_qdot0_cartesian_au_day)\n earth_orbital_plane /= np.linalg.norm(earth_orbital_plane)\n\n logging.debug(f\"Ecliptic plane vector: {earth_orbital_plane}\")\n\n # endregion\n\n # region --- 3 Spacecraft initial position\n\n # Spacecraft geocentric position: perpendicular to earth velocity pointing outwards\n # (i.e. chosen such that it's the one pointing outwards from elliptical orbit)\n # (note this means spacecraft speed != earth speed (helio) + spacecraft speed (geo))\n q0_geocentric_cartesian_unit = np.cross(\n earth_qdot0_cartesian_km_s, earth_orbital_plane\n )\n q0_geocentric_cartesian_unit /= np.linalg.norm(q0_geocentric_cartesian_unit)\n\n q0_geocentric_cartesian_km = q0_geocentric_cartesian_unit * (\n EARTH_RADIUS + altitude\n )\n q0_geocentric_cartesian_AU = q0_geocentric_cartesian_km / UNIT_LENGTH\n\n q0_cartesian_AU = earth_q0_cartesian_AU + q0_geocentric_cartesian_AU\n q0_cartesian_km = q0_cartesian_AU * UNIT_LENGTH\n\n q0_spherical_AU_rad = list(get_position_spherical_from_cartesian(*q0_cartesian_AU))\n\n q0_spherical_AU_deg = list(q0_spherical_AU_rad) # copy, not reference\n q0_spherical_AU_deg[1] = degrees(q0_spherical_AU_deg[1])\n q0_spherical_AU_deg[2] = degrees(q0_spherical_AU_deg[2])\n\n logging.debug(\n f\"Spacecraft initial position unit vector (geocentric, cartesian): \"\n f\"{q0_geocentric_cartesian_unit}\"\n )\n\n logging.debug(\n f\"Spacecraft initial position (geocentric, cartesian, km): \"\n f\"{q0_geocentric_cartesian_km}\"\n f\" (distance from Earth center: {np.linalg.norm(q0_geocentric_cartesian_km)})\"\n )\n\n logging.debug(\n f\"Spacecraft initial position (geocentric, cartesian, AU): \"\n f\"{q0_geocentric_cartesian_AU}\"\n f\" (distance from Earth center: {np.linalg.norm(q0_geocentric_cartesian_AU)})\"\n )\n logging.debug(\n f\"Spacecraft initial position (heliocentric, cartesian, AU): \"\n f\"{q0_cartesian_AU}\"\n )\n logging.debug(\n f\"Spacecraft initial position (heliocentric, cartesian, km): \"\n f\"{q0_cartesian_km}\"\n )\n\n logging.debug(\n f\"Spacecraft initial position (heliocentric, spherical, AU & rad): \"\n f\"{q0_spherical_AU_rad}\"\n )\n logging.debug(\n f\"Spacecraft initial position (heliocentric, spherical, AU & deg): \"\n f\"{q0_spherical_AU_deg}\"\n )\n\n # endregion\n\n # region --- 4 Spacecraft initial velocity\n # Spacecraft velocity = Earth velocity + leo speed (same direction as Earth)\n\n leo_speed = get_circular_orbit_speed(\"Earth\", altitude)\n\n qdot0_cartesian_unit = list(earth_qdot0_cartesian_km_s)\n qdot0_cartesian_unit /= np.linalg.norm(qdot0_cartesian_unit)\n\n qdot0_cartesian_km_s = earth_qdot0_cartesian_km_s + qdot0_cartesian_unit * leo_speed\n qdot0_cartesian_km_s_speed = np.linalg.norm(qdot0_cartesian_km_s)\n\n # Get spherical velocity vector from cartesian velocity vector\n qdot0_spherical_km_s_rad = get_velocity_spherical_from_cartesian(\n q0_cartesian_km, qdot0_cartesian_km_s\n )\n\n qdot0_spherical_km_s_rad_speed = get_speed_spherical(\n r_average_km,\n theta_average_rad,\n qdot0_spherical_km_s_rad[0], # rdot\n qdot0_spherical_km_s_rad[1], # thetadot\n qdot0_spherical_km_s_rad[2], # phidot\n )\n\n qdot0_spherical_AU_rad_y = [\n qdot0_spherical_km_s_rad[0] / UNIT_VELOCITY, # AU/y\n qdot0_spherical_km_s_rad[1] * UNIT_TIME, # rad/y\n qdot0_spherical_km_s_rad[2] * UNIT_TIME, # rad/y\n ]\n\n qdot0_spherical_AU_rad_year_speed = get_speed_spherical(\n r_average_AU,\n theta_average_rad,\n qdot0_spherical_AU_rad_y[0], # rdot\n qdot0_spherical_AU_rad_y[1], # thetadot\n qdot0_spherical_AU_rad_y[2], # phidot\n )\n\n logging.debug(\n f\"Spacecraft initial velocity unit vector (cartesian, km/s): \"\n f\"{qdot0_cartesian_unit}\"\n )\n logging.debug(\n f\"Spacecraft initial velocity vector (cartesian, km/s): {qdot0_cartesian_km_s}\"\n f\" (speed: {qdot0_cartesian_km_s_speed})\"\n )\n\n logging.debug(\n f\"Spacecraft initial velocity vector (spherical, km/s & rad/s): \"\n f\"{qdot0_spherical_km_s_rad}\"\n f\" (speed: {qdot0_spherical_km_s_rad_speed})\"\n )\n\n logging.debug(\n f\"Spacecraft initial velocity vector (spherical, AU/y & rad/y): \"\n f\"{qdot0_spherical_AU_rad_y}\"\n f\" (speed: {qdot0_spherical_AU_rad_year_speed})\"\n )\n\n # endregion\n\n # region --- 5 Calculate B0 from Q0, Qdot0\n\n # FINAL OUTPUT: Initial coordinates (Q)\n Q0 = q0_spherical_AU_rad\n\n # FINAL OUTPUT: Initial momenta per mass (B)\n R, theta, _ = Q0\n Rdot, thetadot, phidot = qdot0_spherical_AU_rad_y\n\n B_R = get_B_R(Rdot)\n B_theta = get_B_theta(R, thetadot)\n B_phi = get_B_phi(R, theta, phidot)\n\n B0 = [B_R, B_theta, B_phi]\n\n # Info log output\n logging.info(\n f\"Spacecraft initial position vector, Q0 (spherical, AU & rad): \" f\"{Q0}\"\n )\n logging.info(\n f\"Spacecraft initial momentum per mass vector, B0 (AU/y & rad/y): {B0}\"\n )\n\n # endregion\n\n return Q0, B0\n\n\ndef get_circular_sun_orbit_position_and_velocity(altitude=UNIT_LENGTH - SUN_RADIUS):\n \"\"\"Get orbit speed for circular orbit around the at altitude.\n\n Keyword Arguments:\n altitude {float} -- Altitude above Sun surface (km)\n (default: {UNIT_LENGTH-SUN_RADIUS})\n\n Returns:\n float -- Required orbit speed for circular orbit around sun (km/s)\n \"\"\"\n\n sun_orbital_speed = get_circular_orbit_speed(body=\"Sun\", altitude=altitude)\n\n logging.debug(\n f\"Required orbital speed at altitude {altitude:.2f} km above Sun: \"\n f\"{sun_orbital_speed} km/s\"\n )\n\n # Position\n q0_cartesian_AU = [1, 0, 0]\n q0_cartesian_km = [q * UNIT_LENGTH for q in q0_cartesian_AU]\n q0_spherical_AU_rad = get_position_spherical_from_cartesian(*q0_cartesian_AU)\n\n logging.debug(f\"Position (heliocentric, cartesian, AU: {q0_cartesian_AU}\")\n logging.debug(f\"Position (heliocentric, cartesian, km: {q0_cartesian_km}\")\n logging.debug(f\"Position (heliocentric, spherical, AU & rad: {q0_spherical_AU_rad}\")\n\n # Velocity\n qdot0_cartesian_km_s = [0, sun_orbital_speed, 0]\n qdot0_cartesian_AU_y = [v_i / UNIT_VELOCITY for v_i in qdot0_cartesian_km_s]\n qdot0_spherical_AU_rad_y = get_velocity_spherical_from_cartesian(\n q0_cartesian_AU, qdot0_cartesian_AU_y\n )\n\n logging.debug(f\"Velocity (heliocentric, cartesian, km/s: {qdot0_cartesian_km_s}\")\n logging.debug(f\"Velocity (heliocentric, cartesian, AU/y: {qdot0_cartesian_AU_y}\")\n logging.debug(\n f\"Velocity (heliocentric, spherical, AU/y & rad/y: {qdot0_spherical_AU_rad_y}\"\n )\n\n # FINAL OUTPUT: Initial coordinates (Q)\n Q0 = q0_spherical_AU_rad\n\n # FINAL OUTPUT: Initial momenta per mass (B)\n R, theta, _ = Q0\n Rdot, thetadot, phidot = qdot0_spherical_AU_rad_y\n\n B_R = get_B_R(Rdot)\n B_theta = get_B_theta(R, thetadot)\n B_phi = get_B_phi(R, theta, phidot)\n\n B0 = [B_R, B_theta, B_phi]\n\n # Info log output\n logging.info(\n f\"Spacecraft initial position vector, Q0 (spherical, AU & rad): \" f\"{Q0}\"\n )\n logging.info(\n f\"Spacecraft initial momentum per mass vector, B0 (AU/y & rad/y): {B0}\"\n )\n\n return Q0, B0\n\n\nif __name__ == \"__main__\":\n\n from orbsim.r4b_3d.logging import logging_setup\n\n logging_setup()\n\n # # get_leo_position_and_velocity()\n\n get_circular_sun_orbit_position_and_velocity()\n" }, { "alpha_fraction": 0.6286472082138062, "alphanum_fraction": 0.6350574493408203, "avg_line_length": 28.180644989013672, "blob_id": "531ab85c62acd1e6cde66e013cf6b911bfccdc89", "content_id": "f22add397253a5dfce5faa808cfe30a35e54f78a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4524, "license_type": "permissive", "max_line_length": 88, "num_lines": 155, "path": "/code/tests/r4b_3d/test_ephemerides.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nPytest module of corresponding python file without \"test_\" in the name.\n\"\"\"\nimport json\nimport os\n\nimport pytest\n\nfrom orbsim.r4b_3d.ephemerides import (\n get_ephemerides,\n get_ephemerides_on_day,\n get_coordinates_on_day_rad,\n)\n\n\ndef import_expected(function: str):\n \"\"\"import data from json file created by mathematica script and import as\n 'ground truth'\n \"\"\"\n\n math_json_filename = os.path.basename(__file__).split(\".\")[0] + \".json\"\n\n with open(\n os.path.dirname(os.path.realpath(__file__)) + \"/\" + math_json_filename\n ) as file:\n data = json.load(file)\n\n return data[function]\n\n\ndef filter_expected(function_name: str, data):\n xsuccess = []\n xfail = []\n\n for test_input, test_output in data:\n\n input_str = str(test_input)\n\n if isinstance(test_output, str):\n # Expect fail --> Make pytest.param marked with xfai\n # e.g. pytest.param(\"6*9\", 42, marks=pytest.mark.xfail)\n xfail.append(\n pytest.param(\n f\"{function_name}(ephemerides,{input_str})\",\n None,\n marks=pytest.mark.xfail(raises=ValueError),\n )\n )\n\n else:\n # Expect success -> make tuple list (\"function(input)\", test_output)\n eph = get_ephemerides_on_day(get_ephemerides(), test_input)\n sun = list(eph[\"sun\"])\n earth = list(eph[\"earth\"])\n mars = list(eph[\"mars\"])\n\n del sun[2]\n del earth[2]\n del mars[2]\n\n xsuccess.append(([sun, earth, mars], test_output))\n\n return xsuccess, xfail\n\n\ndef process_ephemerides_for_coordinate_function(function_name: str, data):\n coords = []\n\n for test_input, expected in data:\n # Expect success -> make tuple list (\"function(input)\", test_output)\n output = list(\n get_coordinates_on_day_rad(\n get_ephemerides_on_day(get_ephemerides(), test_input)\n )\n )\n\n coords.append((output, expected))\n\n return coords\n\n\ntest_data_list = import_expected(\"get_ephemerides_on_day\")\nxsuccess, xfail = filter_expected(\"get_ephemerides_on_day\", test_data_list)\n\ntest_data_list2 = import_expected(\"get_coordinates_on_day_rad\")\ncoords = process_ephemerides_for_coordinate_function(\n \"get_coordinates_on_day_rad\", test_data_list2\n)\n\n\[email protected](\"test_input, expected\", xfail)\ndef test_invalid_days(test_input, expected):\n \"\"\"\n Tests of get_ephemerides_on_day\"\"\"\n\n # 1. We have to use pytest.approx since there is a difference on the rounding of the\n # very last decimal in reading in the .csv between Python and Mathematica.\n\n # 2. We have to iterate over the input/output with for loop since pytest.approx does\n # not support nested structures.\n\n ephemerides = get_ephemerides() # pylint: disable=W0612\n assert eval(test_input) == expected # pylint: disable=W0123\n\n\[email protected](\"test_input, expected\", xsuccess)\ndef test_valid_days(test_input, expected):\n \"\"\"\n Tests of get_ephemerides_on_day\"\"\"\n\n # 1. We have to use pytest.approx since there is a difference on the rounding of the\n # very last decimal in reading in the .csv between Python and Mathematica.\n\n # 2. We have to iterate over the input/output with for loop since pytest.approx does\n # not support nested structures.\n\n for i, test_input_part in enumerate(test_input):\n assert test_input_part == pytest.approx(expected[i])\n\n\[email protected](strict=True, raises=ValueError)\ndef test_invalid_planets():\n \"\"\"Tests of get_ephemerides (INVALID PLANETS)\"\"\"\n\n get_ephemerides(planets=(\"earth\", \"mercury\"))\n\n\[email protected](raises=ValueError)\ndef test_invalid_end_year():\n \"\"\"Tests of get_ephemerides (INVALID YEARS)\"\"\"\n\n get_ephemerides(end_year=\"2042\")\n\n\[email protected](\"test_input, expected\", coords)\ndef test_coords_only_valid(test_input, expected):\n \"\"\"\n Tests of get_coordinates_on_day_rad (ONLY VALID),\n validity ensured by previous function\n \"\"\"\n\n for i, test_input_part in enumerate(test_input):\n assert test_input_part == pytest.approx(expected[i])\n\n\n# if __name__ == \"__main__\":\n\n# # imported_data = import_expected(\"get_ephemerides_on_day\")\n# # xs, xf = filter_expected(\"get_ephemerides_on_day\", imported_data)\n\n# coords = process_ephemerides_for_coordinate_function(\n# \"get_coordinates_on_day_rad\", test_data_list\n# )\n\n# x = 2\n\n" }, { "alpha_fraction": 0.6085308194160461, "alphanum_fraction": 0.6085308194160461, "avg_line_length": 22.44444465637207, "blob_id": "7f88302287fd16eeb76ef14772582fccf5ee6f5b", "content_id": "67b08178d345a61e44eb6a61f5cfa4735f6e7088", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1055, "license_type": "permissive", "max_line_length": 83, "num_lines": 45, "path": "/code/orbsim/r4b_3d/logging.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"All logging setup related functions.\n\"\"\"\n\n\nimport logging\n\n\ndef logging_setup(level=\"debug\"):\n \"\"\"Setup logging at specified level.\n\n Keyword Arguments:\n level {str} -- debug level (default: {debug})\n \"\"\"\n\n if level == \"debug\":\n level = logging.DEBUG\n elif level == \"info\":\n level = logging.INFO\n elif level == \"warning\":\n level = logging.WARNING\n elif level == \"error\":\n level = logging.ERROR\n elif level == \"critical\":\n level = logging.CRITICAL\n else:\n raise ValueError(\n \"Not valid debug level. Options: debug, info, warning, error, critical\"\n )\n\n logger = logging.getLogger()\n logger.setLevel(level)\n\n formatter = logging.Formatter(\n \"%(asctime)s - %(levelname)s (%(funcName)s): %(message)s\"\n )\n\n fh = logging.FileHandler(\"logs/log.txt\")\n fh.setLevel(level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n ch = logging.StreamHandler()\n ch.setLevel(level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n" }, { "alpha_fraction": 0.5933842062950134, "alphanum_fraction": 0.6208651661872864, "avg_line_length": 23.860759735107422, "blob_id": "62b7dd6b8c8a31a8fa0be9b41f728bb5fdb9df87", "content_id": "52bfaa274b8f4241e5d11fbfc700305b81c6e336", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1965, "license_type": "permissive", "max_line_length": 86, "num_lines": 79, "path": "/code/pyscripts/golf_course.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\nfrom orbsim.r3b_2d.simulators import run_sim\n\nfrom multiprocessing import Pool\nimport multiprocessing as mp\nfrom numba import njit\nimport datetime\n\nimport matplotlib as mpl\n\nmpl.use(\"Agg\")\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import Normalize\nimport numpy as np\nfrom math import pi, log\n\ntau = 2 * pi\n\nfilename = \"golf_course_extrazoom_s300\"\n\n\n@njit\ndef run_with_scaled_score(psi):\n score, success, _ = run_sim(psi, duration=200, max_iter=1e7)\n if not success:\n score += 1\n score *= 10\n score += psi[2]\n score = log(score)\n return score, success\n\n\n@njit\ndef golfcourse_row(pos, burns):\n result = [run_with_scaled_score([pos, 0.023901745288554, burn]) for burn in burns]\n return result\n\n\nif __name__ == \"__main__\":\n sz = 300\n lbp, ubp = [4.8, 5.0]\n lbb, ubb = [3.1, 3.11]\n p = Pool(mp.cpu_count())\n poss = np.linspace(lbp, ubp, sz)\n burns = np.linspace(lbb, ubb, sz)\n result = np.array(\n p.starmap(golfcourse_row, [(poss[idx], burns) for idx in range(sz)])\n )\n\n cmap = plt.cm.viridis\n scores, successes = result.transpose(2, 0, 1)\n\n with open(f\"{filename}.txt\", \"w\") as matfile:\n smatrix = scores.reshape(sz, sz)\n np.savetxt(matfile, smatrix, fmt=\"%.4f\")\n\n greys = np.empty(scores.shape + (3,), dtype=np.uint8)\n greys.fill(70)\n colors = Normalize(min(scores.flatten()), max(scores.flatten()))(scores)\n colors = cmap(colors)\n alphas = [[1 if succ else .4 for succ in alpharow] for alpharow in successes]\n\n colors[..., -1] = alphas\n\n fig, ax = plt.subplots()\n plt.imshow(greys)\n plt.imshow(\n colors,\n vmin=min(scores.flatten()),\n vmax=max(scores.flatten()),\n extent=[lbb, ubb, lbp, ubp],\n interpolation=\"none\",\n origin='lower'\n )\n plt.colorbar()\n ax.set_xlabel(\"burnDv\")\n ax.set_ylabel(\"position\")\n ax.set_aspect((ubb - lbb) / (ubp - lbp))\n\n plt.savefig(f\"{filename}.png\")\n" }, { "alpha_fraction": 0.4108254313468933, "alphanum_fraction": 0.6600812077522278, "avg_line_length": 40.9886360168457, "blob_id": "cdab1cbed39ca0c9c38c5aaa30557d8cbbcd5993", "content_id": "40fa6f8a369fd582db3a15b1cc3869e0dba88ee4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3695, "license_type": "permissive", "max_line_length": 149, "num_lines": 88, "path": "/docker/examples/_ex7.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "def run_example7(solver=\"snopt7\"):\n \"\"\"\n This example demonstrates the indirect method (cartesian) on a orbit to orbit scenario.\n The orbits are those of Earth and Mars.\n \"\"\"\n import pykep as pk\n import pygmo as pg\n import numpy as np\n from matplotlib import pyplot as plt\n from pykep.examples import add_gradient, algo_factory\n\n # Some pre-computed solutions (obtaining spending some iterations and\n # restarts from random initial guesses)\n z_mass_optimal = [397.88267909228767, 2.0719343674552215, 2.7313941033119407, 11.882803539732214, 10.567639551625298,\n 0.50803671389927796, -11.056641527923768, 12.176151455434058, 4.1269457596809245, 3.4434953247725324]\n z_quadratic_control = [381.32031472240106, 1.0102363292172423, 1.8134352964367946, 19.522442569527868, -\n 6.7894353762521105, -3.3749783899165928, 7.0438655057343054, 19.923912672512174, 0.93896446800741751, 3.5483645070393743]\n z_quadratic_control2 = [459.51623108767666, -1.616057488705803, 0.33049652475302532, -17.735981532357027, -\n 3.2374905349904912, 2.2249621531880934, 2.9550456430212937, -20.226761256676323, -2.9684113654904061, 3.1471248891703905]\n z_quadratic_control3 = [519.45371103815569, 0.39617485433378341, 2.7008977766929818, 7.9136210333255468, -\n 11.03747486077437, -3.0776988186969136, 9.1796869310249747, 6.5013311040515687, -0.2054349910826633, 3.0084671211666865]\n # A random initial solution\n z_random = np.hstack(\n [[np.random.uniform(100, 700)], 2 * np.random.randn(9)])\n\n # We use an initial guess within 10% of a known optima, you can experiment what happens\n # with a different choice\n z = z_quadratic_control + z_quadratic_control * np.random.randn(10) * 0.1\n #z = z_random\n\n # 1 - Algorithm\n algo = algo_factory(solver)\n\n # 2 - Problem. We define a minimum quadratic control problem (alpha=0) with free time\n # (hamiltonian will be forced to be 0). We provide the option for estimating the gradient numerically for\n # algorithms that require it.\n udp = add_gradient(pk.trajopt.indirect_or2or(\n elem0=[149598261129.93335, 0.016711230601231957,\n 2.640492490927786e-07, 3.141592653589793, 4.938194050401601, 0],\n elemf=[227943822376.03537, 0.09339409892101332,\n 0.032283207367640024, 0.8649771996521327, 5.000312830124232, 0],\n mass=1000,\n thrust=0.3,\n isp=2500,\n atol=1e-12,\n rtol=1e-12,\n tof=[100, 700],\n freetime=True,\n alpha=0,\n bound=True,\n mu=pk.SUN_MU),\n with_grad=False)\n\n prob = pg.problem(udp)\n prob.c_tol = [1e-7] * 10\n\n # 3 - Population (i.e. initial guess)\n pop = pg.population(prob)\n pop.push_back(z)\n\n # 4 - Solve the problem (evolve)\n pop = algo.evolve(pop)\n\n # 5 - Inspect the solution\n if prob.feasibility_x(pop.champion_x):\n print(\"Optimal Found!!\")\n # We call the fitness to set the leg\n udp.udp_inner.fitness(pop.champion_x)\n arr = udp.udp_inner.leg.get_states(1e-12, 1e-12)\n print(\"Final mass is: \", arr[-1, 7])\n else:\n print(\"No solution found, try again :)\")\n # plot trajectory\n axis = udp.udp_inner.plot_traj(\n pop.champion_x, quiver=True, mark=\"k\", length=1)\n plt.title(\"The trajectory in the heliocentric frame\")\n\n # plot control\n udp.udp_inner.plot_control(pop.champion_x)\n plt.title(\"The control profile (throttle)\")\n plt.ion()\n plt.show()\n\n # Show the trajectory data\n udp.udp_inner.pretty(pop.champion_x)\n\nif __name__ == \"__main__\":\n run_example7()\n" }, { "alpha_fraction": 0.5682032108306885, "alphanum_fraction": 0.6105362176895142, "avg_line_length": 28.52777862548828, "blob_id": "4e2646e37d3a60a6dc63002474394ee5a7d75848", "content_id": "cd0e0547a68ccefcf347516ef323c7d70dd112e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1063, "license_type": "permissive", "max_line_length": 71, "num_lines": 36, "path": "/docker/examples/_ex2.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "def run_example2():\n import matplotlib as mpl\n from mpl_toolkits.mplot3d import Axes3D\n\n import matplotlib.pyplot as plt\n from pykep import epoch, DAY2SEC, AU, SUN_MU, lambert_problem\n from pykep.planet import jpl_lp\n from pykep.orbit_plots import plot_planet, plot_lambert\n\n mpl.rcParams['legend.fontsize'] = 10\n\n fig = plt.figure()\n axis = fig.gca(projection='3d')\n\n t1 = epoch(0)\n t2 = epoch(640)\n dt = (t2.mjd2000 - t1.mjd2000) * DAY2SEC\n\n axis.scatter([0], [0], [0], color='y')\n\n pl = jpl_lp('earth')\n plot_planet(\n pl, t0=t1, color=(0.8, 0.8, 1), legend=True, units=AU, ax=axis)\n rE, vE = pl.eph(t1)\n\n pl = jpl_lp('mars')\n plot_planet(\n pl, t0=t2, color=(0.8, 0.8, 1), legend=True, units=AU, ax=axis)\n rM, vM = pl.eph(t2)\n\n l = lambert_problem(rE, rM, dt, SUN_MU)\n plot_lambert(l, color='b', legend=True, units=AU, ax=axis)\n plot_lambert(l, sol=1, color='g', legend=True, units=AU, ax=axis)\n plot_lambert(l, sol=2, color='g', legend=True, units=AU, ax=axis)\n\n plt.show()\n" }, { "alpha_fraction": 0.6785010099411011, "alphanum_fraction": 0.6962524652481079, "avg_line_length": 21.954545974731445, "blob_id": "4f51696b3b97b54514460f1a039be663ab8d2ef4", "content_id": "cd9acdc293c0b42c4cabc007882c73833b45a4e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "permissive", "max_line_length": 53, "num_lines": 22, "path": "/code/pyscripts/visualize_cudasim_results.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\n\nimport numpy as np\nfrom math import log\n\nnp.random.seed(1)\nscores = np.array(np.loadtxt(\"cuda_moon_scores.txt\"))\nprint(scores.shape)\n# x = range(scores.shape[0])\nNiter = 1000\nx = range(Niter)\nfor idx in np.random.randint(0, scores.shape[1], 3):\n timeline = scores.T[idx]\n logt = [log(t) for t in timeline[:Niter]]\n plt.plot(x, logt)\nax = plt.gca()\nax.set_xlabel(\"iterations\")\nax.set_ylabel(\"log(fitness)\")\nplt.show()\n\n" }, { "alpha_fraction": 0.7698820233345032, "alphanum_fraction": 0.7768606543540955, "avg_line_length": 62.86651611328125, "blob_id": "70beccff6561eb92c1ad6299c9c2a18f0562f3f4", "content_id": "871ec2cd358d8c60465ce96c2f12e7aad0a9fd6c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28635, "license_type": "permissive", "max_line_length": 752, "num_lines": 442, "path": "/letomes-worklog.md", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "# Worklog\n\n## September 17th\n\n### Oisin worked on plots and computing final numbers for lyapunov aspect of discussion\n\n## August 5\n\n### Gandalf: Equation of motions for R4B\nLavede et udkast til bevægelsesligninger for R4B (Restricted 4 Body) system og sendt til Poul over mail (alle andre på cc). Måske er polære koordinater bedre.\n\n## August 2\n\n### Gandalf: Pytest på plads\n\nPakke kan nu installeres med `pip install -e .` fra package root (dvs. `letomes/code`).\nPytests kan nu køres fra root med kommando `pytest`.\nMathematica spytter sine resultater ud i json fil, og importeres i pytest filer til sammenligning.\nNu er Pdot funktionen blevet unit testet.\n\n## July 31\n\n### Gandalf: Finally reconciled most of old/new code\n\nOnly need no merge a few more changes, then there's no more \"old code\" after the refactoring fiasco. Many days spend on annoying little things like line ending characters, file permissions etc., and futile attempts on getting the new code to work, even though it wasn't tested throughout refactoring process. But now we're on the right track and almost there.\n\n### Oisin: Extracting ephemeris data for clockwork solar system model\n\nTried to extract ephemeris data for clockwork solar system model. As yet not successful. Wrote to Dario Izzo for help. Plan is to just create the skeleton for the position-polling function tomorrow, instead of banging my head against impenetrable Fortran interfaces.\n\n## July 24\n\n### Oisin\n\nAdded plotting functionality to orbsim. As a follow-up to last entry, i did create a planet class, and in general have been tinkering a lot with the simulator. We are VERY close to having the simulator working, and recreating the old orbits. Once that's done, we recreate it for the MARS case, and bob's your uncle: We're done. I'm positively salivating at the prospect.\n\n## July 17\n\n### Added rewritten simulation files to the new orbsim package that we have fashioned\n\nRemoved them from ipynb. The notebooks should probably have a much more surface-level interaction with the sims and derivations and all that. We agreed to buld a planet class to hold all the stuff about how close we can fly to the planet and all that. It's not handled nicely right now.\n\n## July 14\n\n### Python modules fixed + pandoc demos uploaded\n\nHar indført nogle `__init__.py` filer for at python koden de forskellige steder virker som moduler der kan importeres relativt til top-level. Derudover har jeg uploaded pandoc+academic, en samling a demos/eksempler på brugen af pandoc/markdown som erstatning til ren LaTeX.\n\n## July 13\n\n### Symplectic implementeret i pygmo (WIP)\n\nOisin: Jeg har implementeret simuleringsfunktionerne fra symplectic.py i pygmo problemet. Det kører ikke helt endnu, men jeg forventer at der er et eller andet oppe om ca. to arbejdsdage. Vi har diskuteret masser af nomenklatur, og prøvet at rydde op i diskrepansen mellem skreven matematik og variabelnavne i implementeringen. Den samtale er helt sikkert ikke slut før d. 31. August kl. 23:59 T\\_\\_T\nGandalf: Bare refactored / cleaned moon code + diskuteret konventioner + ligninger med Oisin.\n\n## July 12\n\n### Refactored symplectic ligning + Git LFS snask\n\nStartede med at omskrive symplectic ligning lidt, så vi ikke bliver bidt af det igen, som forberedlese til simplificering af den del af koden. Dagen blev lidt derailed af at vi begge havde git problemer. Vores indførelse af LFS til PDF filer for chapters gav nogle obskure problemer. Gandalf gjorde det forfra i command line og nu virker det tilsyneladende fint.\n\n## July 11\n\n### Pair-programming: forståa sympletic funktionen i bsc-koden\n\nHar brugt en del af dagen på at forstå symplectic funktionen i symplectic.py hvor det meste af logikken sker. Dette er relevant både ift. at implenetere ES i bsc-koden og den nye pagmo basererede kode.\n\n#### Debugger helvede\n\nVi rodede begge lidt med at få debuggeren til at køre i VSCode. Gandalf konkluderede at debuggeren i VSC tager 27 sekunder om at starte på bibliotekets computere. P-hands. Vil prøve at køre det ud af C: drevet (lokalt) i stedet for L: drevet (netværk) i morgen, men er ikke forhåbeningsfuld.\n\n#### Pandoc virker endelig på windows også\n\nHalleluja.\n\n## July 10\n\n### Moon code overblik\n\nGandalf og Oisin har pair programmeret i dag. Specifikt har vi kørt koden igennem lidt birds-eye view, hvorefter vi har fokuseret på \"The business end\", altså solverne og søgealgoritmen. Vi har i den process fundet en fortegnsfejl i implementeringen af Hamilton-ligningerne, der har introduceret en fejl på ca. en procent i BSc bevægelsesligningerne. Værd at sende et addendum ud til alle de stakkels videnskabsfolk der bruger de resultater som hjørnesten i deres egne projekter. Der er garanteret millioner af ERC penge på spil.\n\n## July 9\n\n### MikTeX portable + pandoc bug time wasting\n\nHar ikke kunne compile mit afsnit fordi MikTeX er outdated, så har bare smadret rundt i at få en portable version til at virke. Endte med at lave en form for alias i powerscript profile + compile den seneste version af pandoc fordi der åbenbart er [en bug](https://github.com/jgm/pandoc/issues/4681) i seneste release mht. absolute path på `--pdf-engine` argument. FML, i morgen stikker Oisin og jeg hovederne sammen og laver noget actually worthwhile.\n\n#### report -- Oisin\n\nJeg har skrevet halvanden sides penge om Salimans-NES og dets forskelle fra andre ES typer, samt lidt opdateret info om hvordan pagmo er integreret i vores løsning. Mht. ES, bør jeg nok tilføje lidt om ES vs helt andre algoritmer, og hvordan de opfører sig på forskellige problemtyper.\n\nHar ifm. ovenstående læst nærmere på Salimans ES paper, og føler at have dybnet min forståelse for det underliggende optimeringsproblem. Jeg har altid haft svært ved at forstå concepter kommunikeret udelukkende vha. formel logik, så jeg har fundet en metafor der gør mig glad. Den er skrevet i rapporten.\n\n## July 6\n\n### Pagmo -- Oisin\n\nJeg har implementeret Karpathy/Salimans ES i pagmo, så den er trivielt paralleliserbar. Kører pænt på den lokale maskine. Kører også på HPC, men lidt skrabet. Jeg kunne virkelig godt tænke mig en måde at vise plots fra HPC runs på en måde der ikke involverer at gemme en lokal fil og hente den via scp (-\\_-)\n\nNæste trin er at smide noget jord-måne simulering ind i det problem vi løser (lige nu er det et toy problem space: et sortiment af gaussians). Det bliver velsagtens noget med at kopiere Gandalf's gamle kode ind i mit framework. Tentativ deadline for det er sat i slutningen af næste uge.\n\n### Gandalf: Visual Studio Code indføring\n\nGrundet at værkstedsbesøget for min Macbook Pro trækker ud, har jeg hele ugen været nødsaget til halvt at bruge tiden på at sætte værktøjer op på windows maskinerne i biblioteket. Jeg troede jeg var færdig, men i dag gik det op for mig at jeg ikke har PyCharm til rådighed, og SublimeText kan ikke debugge ordentligt. Så jeg brugte dagen på at sætte mig grundigt ind i VSCode, som hermed er blevet min nye editor of choice.\n\n## July 5\n\n### Projekt Roadmap\n\nOisin og jeg har lavet et detaljeret roadmap for resten af måneden i Asana, kan ses her: <https://app.asana.com/0/732675643618740/timeline>\n\n### Gandalf: ES Openai blog + kode eksempler kørt igennem\n\nI dag kørte jeg [Karparhy's natural ES eksempel](https://github.com/karpathy/randomfun/blob/master/es.ipynb) igennem med 2D hillclimbing (og fandt også et par [småfejl](https://github.com/karpathy/randomfun/pull/3) i den. I morgen starter jeg på at kigge på at få det ind i R3B moon koden.\n\n## June 29\n\n### Møde (OW+GS+ODK)\n\n#### Paperen Izzo2018 (Machine learning and evolutionary techniques in interplanetary trajectory design)\n\nVi snakkede om hvad det er de egentlig har gjort:\n\n1. Regne nogle optimale baner ud med control theory\n2. Generere et træningssæt ved at lave noget random walk rundt om de optimale baner, og gemme dataen om forskellen i delta-v osv.\n3. Bruge de generede træningssæt til at træne et ret standard feed-forward neuralt netværk til at lære de optimale kontinuerte input $u(t)$ .\n Vi skal holde fast i den oprindelige plan med at lave ES på interplanetary transfers, og også overveje at køre det hele med egen kode; behøver vi PyKEP til dette? Er der nogen grund til at vi ikka kan gemme relevante planeters bevægelse i en slags tabel, og så have en simpel simulator, og fokusere vores energi på at optimere inputs til denne simple simulator?\n\n#### Egen simulator til Mars?\n\n1. Er der nogen grund til at conic patched comic sections osv.? Kan vi ikke bare løse systemet af Newton's 2. lov numerisk? Hvorfor have de her 3 legeme systemer som skal strikkes sammen?\n\nVi nok vil bevæge os i retning af at lave vores egen simulator nu.\n\n## June 28\n\n### Gandalf: opsummering af seneste uger (pandoc/LaTeXTypora)\n\nDa jeg er 95% færdig med det jeg har brugt det meste tid på de seneste par uger, venter jeg lige til jeg er helt færdig. Jeg har imidlertid lige skiftet fokus nu, dels fordi jeg lige skulle catche up med et paper inden et Ole møde, og dels fordi min computer skal til reperation i et par dage. Men jeg skriver snart en log af hvad der er sket.\n\n### Tsiolkovsky's rocket equation\n\nHar fulgt og selv genopskrevet to udledninger af denne vigtige ligning:\n$$\n\\Delta v = v_e \\ln{\\frac{m_0}{m_f}}\n$$\nhvor $\\Delta v$ er ændring i fart af raket, $v_e$ er udstødningshastigheden i rakettens system, $m_f$ er rakettens masse efter udstødning, $m_0$ er rakettens masse før udstødning.\n\n[Wiki](https://en.wikipedia.org/wiki/Tsiolkovsky_rocket_equation)\n\n### Specific impulse\n\nEt mål for hvor effektiv en raket er ift. massen/vægten af dets brændstof. Fra [wikipedia](https://en.wikipedia.org/wiki/Specific_impulse):\n\n> Specific impulse (usually abbreviated Isp) is a measure of how effectively a rocket uses propellant or jet engine uses fuel. By definition, it is the total impulse (or change in momentum) delivered per unit of propellant consumed and is dimensionally equivalent to the generated thrust divided by the propellant mass flow rate or weight flow rate.\n\nOg denne grundlæggende relation:\n$$\nF*\\text{thrust} = g_0 \\cdot I*\\text{sp} \\cdot \\dot m,\n$$\nHvor F*{thrust} er fremdriftskraften, $g_0$ er tyngdeacceleration, $I*{sp}$ er den specifikke impuls og $\\dot m$ er massetabsraten.\n\n## June 12\n\n### pykep/pygmo\n\nOisin: Reading docs for pykep and pygmo, forked pykep to make some changes to the plotting functionality. Figure that might be useful later as well. Posted some questions for next meeting. I really want to get to implementing our own ES-algorithm, though pygmo has a very nice CMA-ES that looks like it might do what we need outta the box.\n\n### hpc\n\nOisin: Yesterday, I got HPC@DTU to run my initial experimentation-code, and it took us to mars nicely. Made a branch for HPC@DTU adventures, but i rebased that back onto master again today.\n\n## June 9\n\n### LaTeX stuff\n\nGandalf: Har bare læst op på LaTeX packages og videreudforsket Typora+pandoc>LaTeX worketflowet yderligere.\n\n## June 8\n\n### EJP LaTeX requirments\n\nGandalf brugte dagen på at sætte sig ind i EJPs (European Journal of Physics) LaTeX requirements, så de bliver glade første gang vi submitter og forhåbenligt ikke behøver nogen formaterings-mæssige rettelser.\n\n## June 7\n\n### Not much\n\nGandalf: For mig ikke en særlig produktiv dag. Bøvlede med nogle Mac high-cpu process problemer, og kom ikke så meget videre.\n\n## June 6\n\n### At få BSc koden til at køre igen\n\nGandalf: Den gamle kode kører fint. Har i hvert fald kørt nogle gemte baner, og de kører fint og giver figurer. Prøver en søgning i morgen eller fredag. Numbapro var blevet discountinued, men er blevet FOSS med numba, så np. Kun få til skulle ændres for at det virkede.\n\nHar kigget lidt på noget teori, og ting til mødet i morgen, nu i Asana meeting noterne.\n\n## June 5\n\n### More PyKEP\n\nOisin has been looking at defining custom problems with Pagmo (solver for PyKEP), and at migrating to hpc@dtu. Docker is not available on hpc@dtu, so we'll have to set up a more custom environment. Little experiment notebook added to repo, requires PyKEP installed.\n\n### Paper LaTeX stuff\n\nFiksede bare en masse problemer i LaTeX kildekoden for paperen i dag, og fik reduceret antallet af errors far ~50 til ~30. Undervejs opdagede jeg dog at vi har brug den forkerte template (my bad!), og det ser umiddelbart ud som om at den rigtige template fra European Journal of Physics er 1-column, ikke 2-column, hvilket formegentlig betyder at jeg ikke havde behøvet at fikse figurene. Oh well... vi får se når vi først har fået oprettet paperen som den rigtige article type og jeg får kigget på den endelige LaTeX template.\n\n## June 4\n\n### Projekt status\n\nFørste fælles Thesis work session i 2 måneder. Oisin og Gandalf snakker om status for projektet, og opdaterer Asana.\n\n### PyKEP\n\nRet kraftigt program. Outputter nogle pæne baner i et 3D plot og alt muligt! Har nogle predefinerede missiontyper (gravity assist + single deep space maneuver) som er helt kriminelt nemme at have med at gøre\n\n(groft sagt: **goto([earth, mars, earth])**).\n\nKan oven i købet finde dem med et utal af søgemetoder, bl.a. CMA-ES, der er tangentielt relateret til Salimans etal..\n\nMuligheder for at lave helt custom missionstyper, som Oisin vil kigge på i morgen. Open source.\n\n### Paper progress\n\nPaper printet ud og gennemlæst -> nye rettelser. Gandalf begyndt at fikse figurer.\n\n## March 19\n\n### Møde (Poul+Oisin)\n\nSnakkede lidt om det hele og fik Oisin up to speed.\n\n### PyKEP og Docker\n\nOissin og Gandalf sad resten af eftermiddagen og kæmpede med installation af [PyKEP](https://esa.github.io/pykep/) og Docker image.\nPyKEP virker som en mulighed ift. at bruge et eksisterende library, så vi vil gerne prøve det lidt af så vi kan tage stilling til om vi skal lave vores egen simulering eller ej. Det virker umiddelbart lovende.\n\n## March 9\n\n- Har sat et repostory op for gammelt BSc projekt + oprettet Asana workspace så vi alle kan samarbejde bedre, specielt nu hvor Oisin også er med.\n- Har rodet med noget LaTeX teknisk omkring farver som jeg brugte til Pouls svar i poul-q.pdf\n\n## March 8\n\n### Møde (Poul)\n\nVi snakkede om mine spørgsmål fra poul-q.pdf som tidligere sendt ud på mail, samt øvrige spørgsmål. Jeg har fået anbefalet at tjekke følgende ud af en ven fra Caltech (Casey Handmer, som nu er \"levitation engineer\" hos Hyperloop One):\n\n- [ ] [PyKEP](https://esa.github.io/pykep/) - Et open source python bibliotek fra ESA til orbit simulering i solsystemet, implementeret i C++ med python interface og mange bells and whistles.\n- [ ] [Porkchop plots](https://en.wikipedia.org/wiki/Porkchop_plot)\n- [ ] [Lambert's problem / algorithm](https://en.wikipedia.org/wiki/Lambert%27s_problem)\n\nCasey's fulde besked:\n\n> I only just started work on this problem and its slow progress.\n> The short answer is PyKEP.\n> The long is that traditional porkchop plots are done with Lambert's algorithm, but they tend to break if there's too much plane involved. Damon Landau at JPL has been working this problem too.\n> I'm currently traveling, but please update me next week on my gmail?\n\nJeg lægger snart en poul-q.pdf opdateret med Pouls svar op.\n\n## March 1 + March 4 + March 5 + March 7 ,2018\n\n### BSc gennemlæsning\n\nAlle disse dage arbejdede jeg kun sparsomt og lidt ufokuseret, men på det samme: gennemlæsning af BSc projekt, overførsel af materiale til paper på ShareLaTeX med HH o Poul, og rettelse af småfejl / gen-forståelse af alt undervejs.\n\n## February 28\n\nHar læst en del om blandede orbital mechanics emener i dag:\n\n- Interplanetary Transport Network (ITN)\n- Sphere of influence\n- Hill sphere\n- Roche lobe\n- Lagrange points\n- Halo orbits\n- Lissajous orbits\n- Genudledt nogle basic udtryk (escape velocity, geostationary orbit)\n\n## February 26\n\ntl;dr:\n\n- Videre med BSc gennemlæsning / paper.\n- Lærte programmet Sketch at kende, rettede en figur.\n\nKom et stykke videre i gennemlæsning og gen-håndregning af BSc rapport, samt paper skrivning.\n\nFandt en lille fejl i en figur, og prøvede at åbne den i Inkscape for at rette. Kunne ikke få Inkscape til at virke indenfor de første 30 min på min Mac. Brugte en del tid på at lære et andet vektortegningsprogram at kende, Sketch, som jeg har haft liggende på min Mac et stykke tid. Så nu er jeg i det mindste rustet til at lave flere fine vektor grafik figurer manuelt, hvis det bliver nødvendigt (afhængigt af omstændighederne vil jeg selvfølgelig også overveje TikZ).\n\n## February 22\n\nEr begyndt grundig gennemlæsning af BSc rapport, og gennemgår de vigtigste udregninger i hånden igen.\n\n## February 15\n\nJeg vil nu som noget nyt begynde at skrive tl;dr (Too Long; Didn't Read) opsummeringer øvert i hver worklog entry, så man kan få et hurtigt overblik, og ikke vil læse det hele i detaljer.\n\ntl;dr\n\n- Møde med HH, lidt snak om intuition omkring transfer orbits til Mars, samt lidt tilbageblik på BSc projekt.\n- Har brugt en god del af dagen på at undersøge Python IDEs fordi Python plots i eksterne vinduer er en forfærdelig oplevelse. Jeg er endt på PyCharm Pro pga. dens \"Scientific View\", som er helt genialt.\n\n### Møde (HH)\n\nHH og jeg snakkede lidt om forskellige intuition omkring hvilke transfer orbits der kunne være interessante at kigge på, og jeg kom i tanke om at jeg havde lavet noget Hohmann transfer mellem jordne og Mars analytisk på i Ae105 kurset på Caltech, som jeg vil genopfriske og præsentere ved næste møde eller næste igen.\n\nJeg laver et udkast til en projektbeskrivelse og projektplan vel inden torsdag, og lægger op i repo (skal nok sende en email ud).\n\n#### Old code + Python IDE: PyCharm Pro\n\nJeg beggyndte at kigge på den den gamle BSc kode, og blev igen mindet om hvor utrolig nedern det er med figurer på popper up bagved editoren osv. Jeg brugte SublimeText + Terminal dengang, men jeg besluttede mig for at prøve noget andet.\n\n- **Spyder:** Har brugt det lidt som hjælpelærer, og det har to store styrker: integrerede plots og variable explorer (og basic debugging tools). Men der er ret _wonky_ som applikation, og på mange måder ikke særlig lækkert at bruge.\n- **PyCharm:** Full-stack development IDE, som jeg før har snuset til, men synes var for stort / tungt at danse med (ligesom Eclipse). Jeg har dog til min store glæde fundet ud af at PyCharm Pro (som kan fås med educational license). Så kom der styr på en ting mere.\n\n## February 8\n\n### Møde (Poul + HH + Ole)\n\nFørste fællesmøde med alle vejledere.\nAlle enige om at projektet er sjovt og højaktuelt med tirsdagens Falcon Heavy launch og Tesla Roadsteren i orbit.\n\n- High-level diskussion af projektet.\n- Gandalf vil udarbejde et udkast til en skitse til en projektplan, præsentere til HH i næste uge.\n- Poul vil undersøge mere om hvordan man traditionelt laver baner til Mars.\n- Ole foreslår at vi holder fast i månen, og måske først tester en ES algoritme på den før vi tager fat i Mars. Alle er enige om at prøve af på månen først, og at det interessante mål stadig er at komme til Mars.\n- Det diskuteres om hvordan Asteroidebæltet håndteres i modellen. Ole foretog en hurtig Google søgning, der tyder på at asteroidebæltet er meget tyndt 'befolket' med asteroider, og at man sandsynligvis derfor kan se bort fra det. Gandalf og Poul vil undersøge endelig bekræftelse. Alternativt kan man altid lave en orbit en smule ude af planen af solsystemet.\n- Gandalf og Poul vil færdiggøre artikel som i bund og grund er BSc projektet kogt ned i en kort artikel, og dermed også få repeteret BSc projektet i samme hug.\n- Ole foreslår at jeg Googler \"variational optimization\" som endnu en mulig søgningsmetode.\n\n## February 7\n\n### Tower / Git studier done\n\nJeg lærte et par nyttige ting undervejs:\n\n#### Git LFS (Large File Storage)\n\nHvis man har meget store binære filer i sit repo som ændrer sig en smule hele tiden, kan det give et meget stort repository size meget hurtigt fordi git lagerer en komplet kopi af alle versioner af de binære filer.\nDette kan løses, i hvert fald delvist, med Git LFS. Det gar i korte træk ud på at man dropper kravet om at gemme alle versioner af en bestemt fil / mappe / file extension / filename pattern _lokalt_ men kun have de filer liggende lokalt som skal bruges i den nuværende revision. Alle versioner af store filer markeret til LFS ligger stadig på remote server (i LFS Store) men lokalt ligger alle version af filerne kun som _pointere_ til remote LFS Store, og kun filer som skal bruges i den nuværende checked out version, er downloaded lokalt. Dvs. det løser problemet lokalt, dog vil alle versioner af store filer stadig ligge på remote server. Good to know.\nGode forklaringer:\n\n- <https://www.git-tower.com/learn/git/ebook/en/command-line/advanced-topics/git-lfs#chapter_installing+git+lfs>\n- <https://www.atlassian.com/git/tutorials/git-lfs>\n- <https://www.youtube.com/watch?time_continue=16&v=9gaTargV5BY>\n\n#### Git Submodules\n\nOfte vil man gerne inkludere eksterne biblioteker og andre resourcer. Man kan selvfølgelig downloade disse og kopiere dem ind i ens eget projekt. Der er to problemer med dette:\n\n1. Man blander ekstern kode med ens egen unikke kode / projekt filer, men det er mere clean at holde disse ting adskildt, specielt hvis man skal dele koden udadtil med resten af verden senere.\n2. Hvis det eksterne bibliotek bliver opdateret (med fx. bugfixes eller nye features), er det ret bøvlet at opdateret dette biblioteks kode i ens eget repo; igen er vi nødt til at hente de rå filer, og overskrive de gamle filer.\n\nSubmodules gør det muligt at have et \"git repo indeni et git repo\" således at det interne repo (kaldet et submodule), ikke bliver tracket af parent repo, men at de holdes som to separate git repos selvom det ene ligger indeni det andet. Det er dog stadig nemt at holde de interne submodule opdateret.\nEn vigtig forskel fra normale git repos og submodule repos er at submodules altid peger på en bestemt commit, snarere end en bestemt branch. Dette er fordi at submodules ofte bruges til eksterne libraries som man ikke vil have ændrer sig så ofte, medmindre man manuelt gør det.\n\nGod forklaring på:\n\n- <https://www.git-tower.com/learn/git/ebook/en/desktop-gui/advanced-topics/submodules>\n\n#### .bib file citekeys (evt. via Alfred workflow)\n\nJeg har søgt forgæves på nettet efter et Alfred workflow der kan lave autocompletion og insertion af BibTeX citekeys fra en .bib fil ind i et tekstfelt med formatering fx. [@Newton], som bruges i pandoc / pandoc-citeproc. Grunden til at jeg kunne bruge dette er at Typora (min primære markdown editor), desværre ikke understøtter autocompletion / suggestion af citekeys fra en .bib fil, som mange dedikerede LaTeX editorer og LaTeX pakker til populære text editors (fx. LaTEXTools til SublimeText), gør det. Det betyder at jeg 100% manuelt skal sidde og skrive alle citations uden ind nogen hjælp, hvilket er en smule træls, men noget jeg må leve med hvis det er, for jeg kan godt lide Typora/Markdown/pandoc workflowet fremfor det rene LaTeX workflow.\n\nEfter at have søgt Google, har jeg oprettet en [issue](https://github.com/andrewning/alfred-workflows-scientific/issues/9) på denne persons Github repo for et Alfred workflow, samt oprettet [dette](https://www.alfredforum.com/topic/11223-autocompletion-references-from-bib-file/) forum indlæg på alfredforum.com. Mere kan jeg ikke gøre foreløbigt.\n\nPS: Jeg arbejde ikke på thesis i mandags (February 5) da jeg havde brug for at arbejde på AI i stedet der.\n\n## February 4\n\n### Tower (git client) familiarisering\n\nI dag var ren studie af Tower, hvor jeg gjorte mig bekendt med programmet, og har fulgt deres [video læringsmateriale](https://www.git-tower.com/learn/git/videos), som jeg er ca. halvt færdig med.\n\n## February 1\n\n### Git client besluttet: Tower\n\nJeg har konkluderet at jeg godt kunne bruge en Git klient med lidt flere features, og er endt på [Tower](https://www.git-tower.com/). Så skulle have sat mig i den, men brugte en del tid på at læse deres [blog](https://www.git-tower.com/blog/home) i stedet. De har mange gode artikler og tips. Jeg vil dog ikke bruge mere arbejdstid på denne blog, men må sætte mig ordentligt ind i programmet ved lejlighed.\n\n### Hazel script til automatisering af .bib fil kopiering fra Mendeley\n\nDernæst færdiggjorte jeg mit Mendeley workflow ved at sætte to [Hazel](https://www.noodlesoft.com) script op som:\n\n1. Automatisk flytter alle (PDF) filer fra min `Mendeley Watched Folder` ind i en backup folder efter de har været der i 1 minut (Mendeley importerer filer fra watched folder ind i library, men lader dem ligge, hvilke kan blive lidt rodet).\n2. Automatisk kopierer .bib filen tilhørende thesis projektet ind i thesis mappen, således at jeg altid har en up-to-date kopi af .bib filen i thesis Github repo, så alt er nicely self contained.\n\nAf mine værktøjsmæssige forberedelser mangler jeg nu:\n\n1. Sætte mig færdigt ind i Tower.\n2. Forsøge et [Alfred](https://www.alfredapp.com) workflow hvor jeg kan indsætte citeringer direkte fra Alfred.\n3. Forsøge et Alfred workflow hvor jeg kan køre terminal command direkte på den åbne mappe i Finder.\n4. Sætte mit Typora/pandoc workflow fuldt op til indskrivningen. Så skriver jeg specialet i Markdown, og kan let konvertere til .tex, .pdf, .html, og endda .doc (_gys_), bl.a. Jeg brugte dette workflow i min LearningTech rapport sidste semester, og fandt det glimrende.\n\nEfter jeg har gjort dette, tager jeg parallelt fat på:\n\n1. Gennemlæsningen af BSc projekt + færdigskrivning af artikel over denne\n2. Grundig læsning af artikel [Evolution Strategies as a Scalable Alternative to Reinforcement Learning](https://arxiv.org/abs/1703.03864)\n\n## January 31\n\n### Reference manager decision: Mendeley\n\nEfter en del research og trial and error endte jeg med... Mendeley. Muligheden for “Create one BibTeX file per group” i Mendeley er afgørende for mig da det betyder man kan synce .bib for hvert enkelt projekt, fx. Thesis, kun med de relevante referencer i en fil til mappen med resten af rapporten, dvs. den bliver 100% self contained selvom man bruger BiBTeX og Mendeley.\n\nVil nok prøve at give den nye Readcube Papers app en chance når betaen kommer, men jeg tvivl på at der er ligeså god BiBTeX support som i Mendeley, så jeg spår den ikke mange chancer. Anyway så kom der låg på det.\n\n### Text editors\n\nJeg undersøgte om [Scrivener](https://www.literatureandlatte.com/scrivener/overview) eller [Ulysses](https://ulyssesapp.com) havde noget at byde på. Selvom de var interessante på hver deres måde (specielt Ulysses som jeg vil bruge til andre ting), vil jeg holde mig til [Typora](https://typora.io)/[pandoc](http://pandoc.org)/latex workflowet for specialet, evt. med [DEVONthink](http://www.devontechnologies.com/products/devonthink/devonthink-pro-office.html) for søgning og opdagelse af relationer imellem tekst dokumenter, selvom det muligvis er overkill.\n\nDermed har jeg blot tilbage at sætte et markdown/makefile workflow op, så er jeg klar til at skrive. Kan altid finde en LaTeX template senere. Det gør jeg bare når det lige passer mig.\n\n## January 29\n\n### Møde (Poul + HH)\n\nMeet and greet. Lidt snak om BSc projektet, om hvad målet er, og hvad evolution strategies grundlæggende er.\n\nFør special officielt oprettes som kursus er der to ting der skal på plads:\n\n1. Gandalf undersøger hvad reglerne er ift. forlængelse af MSc projekt når man har kurser samtidig med.\n2. Titel af projekt. Vores nuværende working title er \"Low Energy Transfer Orbits to Mars using Evolution Strategies\" - spørgsmålet er blot om dette er generelt nok, idet vi nok også kommer til at forsøge andre metoder end kun ES. Så det kan vi vi alle tænke lidt over.\n\nVi foreslår et intro møde hvor alle er til stede næste uge, måske torsdag 8/2.\n\n### Start på research af reference managers\n\nHar før brugt Mendeley, men er ikke helt tilfreds.\nLang historie kort:\n\n1. Håber på / sætter min lid til den kommende joint [Papers/Readcube app](http://blog.readcube.com/post/165237712272/glimpse-into-the-new-readcube-papers-app) som kan komme [når som helst](http://blog.readcube.com/post/168755698872/great-things-are-coming) nu.\n2. Jeg starter nok med at bruge Papers snart, og så migrere når betaen at den nye app nævnt i punkt 1 kommer ud.\n3. Kan være at jeg må falde tilbage på Mendeley hvis nævnte beta er for buggy og/eller for dårlig support af BiBTeX. Fingers crossed.\n\n## Ordliste\n\nES = Evolution Strategies\n" }, { "alpha_fraction": 0.5435668230056763, "alphanum_fraction": 0.5569720268249512, "avg_line_length": 32.372806549072266, "blob_id": "9d215825e422493c6b3bde8f573e3a5f20f26ad0", "content_id": "66e4390fb4b7574de7ac4dbed33f6e8c9d83e866", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7609, "license_type": "permissive", "max_line_length": 114, "num_lines": 228, "path": "/code/orbsim/r3b_2d/integrators.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "import time\nfrom math import sqrt\n\nfrom numba import njit, jit, float64, boolean\n\nfrom . import *\n\n# from ..planets import celestials\nfrom .analyticals import get_pdot_x, get_pdot_y, get_xdot, get_ydot\n\n\n@njit\ndef euler_step_symplectic(h, x, y, p_x, p_y):\n \"\"\"Takes a single time step of the symplectic Euler algorithm\"\"\"\n # Step 1\n x = (x + h * (p_y * h + p_x + y)) / (1.0 + h ** 2)\n # Step 2\n # y = (y - (x - p_y + p_x * h) * h) / (1.0 + h ** 2) # Also works but more complex\n y = y + (p_y - x) * h\n # Step 3\n p_x = p_x + get_pdot_x(x, y, p_y) * h\n p_y = p_y + get_pdot_y(x, y, p_x) * h\n\n return x, y, p_x, p_y\n\n\n@njit\ndef verlet_step_symplectic(h, x, y, p_x, p_y):\n \"\"\"Takes a half step, then another half step in the symplectic Verlet algorithm\"\"\"\n # Step 1 - q_{i+1/2}\n x = (h ** 2 * p_y + 2 * h * (p_x + y) + 4 * x) / (4 + h ** 2)\n y = y + h / 2 * (p_y - x)\n\n # Step 2 - p_{i+1}\n pdot_x = get_pdot_x(x, y, p_y) # old timestep\n pdot_y = get_pdot_y(x, y, p_x) # old timestep\n p_x = (h ** 2 * (2 * pdot_y + p_x) + 4 * h * pdot_x + 4 * p_x) / (4 + h ** 2)\n p_y = p_y + h / 2 * (pdot_y + get_pdot_y(x, y, p_x))\n\n # Step 3 - q_{i+1}\n xdot = get_xdot(y, p_x)\n ydot = get_ydot(x, p_y)\n x += h / 2 * xdot\n y += h / 2 * ydot\n\n return x, y, p_x, p_y\n\n\n@njit\ndef relative_error(vec1, vec2):\n x1, y1 = vec1\n x2, y2 = vec2\n return sqrt(((x2 - x1) ** 2 + (y2 - y1) ** 2) / (x2 ** 2 + y2 ** 2))\n\n\n@njit\ndef symplectic(\n x0, y0, p0_x, p0_y, score, success, duration=3 / UNIT_TIME, max_iter=1e7\n):\n \"\"\"\n runs symplectic adaptive euler-verlet algorithm\n All values are with nondimensionalized units\n \"\"\"\n success[0] = 0\n h = h_DEFAULT\n h_min = h_MIN_DEFAULT\n t = 0 # total elapsed time\n x, y, p_x, p_y = [x0, y0, p0_x, p0_y]\n\n tol = STEP_ERROR_TOLERANCE # * (1e7 / max_iter)\n # print(tol)\n\n path_storage = []\n path_storage.append([x, y, p_x, p_y, h, t])\n smallest_distance = 1e6\n Dv = None\n iteration_count = 0\n # print(orbital_radius_upper_bound)\n # earth = Planet(celestials.EARTH)\n target_orbital_radius = LLO_RADIUS\n target_orbital_velocity = LLO_VELOCITY\n target_position_x = LUNAR_POSITION_X\n target_position_y = 0\n target_celestial_radius = LUNAR_RADIUS\n target_celestial_mass = LUNAR_MASS\n target_altitude = LUNAR_ALTITUDE\n target_orbital_radius_nondim = target_orbital_radius / UNIT_LENGTH\n target_orbital_velocity_nondim = target_orbital_velocity / UNIT_VELOCITY\n\n earth_orbital_radius = LEO_RADIUS\n earth_orbital_velocity = LEO_VELOCITY\n earth_position_x = EARTH_POSITION_X\n earth_position_y = 0\n earth_celestial_radius = EARTH_RADIUS\n earth_celestial_mass = EARTH_MASS\n earth_altitude = EARTH_ALTITUDE\n earth_orbital_radius_nondim = LEO_RADIUS_NONDIM\n earth_orbital_velocity_nondim = LEO_VELOCITY_NONDIM\n\n orbital_radius_lower_bound = (\n target_orbital_radius - ORBITAL_TOLERANCE\n ) / UNIT_LENGTH\n orbital_radius_upper_bound = (\n target_orbital_radius + ORBITAL_TOLERANCE\n ) / UNIT_LENGTH\n while t < duration:\n if iteration_count > max_iter:\n # print(\"exceeded max iterations, stranded in space!\")\n score[0] = smallest_distance\n return path_storage\n\n x_euler, y_euler, p_euler_x, p_euler_y = euler_step_symplectic(\n h, x, y, p_x, p_y\n )\n x_verlet, y_verlet, p_verlet_x, p_verlet_y = verlet_step_symplectic(\n h, x, y, p_x, p_y\n )\n err = relative_error([x_euler, y_euler], [x_verlet, y_verlet])\n\n if err < tol or h <= h_min:\n iteration_count += 1\n x = x_verlet\n y = y_verlet\n p_x = p_verlet_x\n p_y = p_verlet_y\n\n t += h\n h = max(h_min, h * max(0.1, 0.8 * sqrt(tol / err)))\n # TODO: explain this with /HH's new comments. 0.8 is chosen empirically\n # old explanation below:\n \"\"\"Accept the step only if the weighted error is no more than the\n tolerance tol. Estimate an h that will yield an error of tol on\n the next step and use 0.8 of this value to avoid failures.\"\"\"\n\n else:\n # print(f\"deny step {h},{err}\")\n h = max(h_min, h / 2)\n continue\n\n \"\"\"Are we nearly there yet? (calculate distance)\"\"\"\n target_distance_x = x - target_position_x\n target_distance_y = y - target_position_y\n target_distance = sqrt(target_distance_x ** 2 + target_distance_y ** 2)\n if target_distance > 2.3:\n # print(\"we are way too far away, stranded in space!\")\n score[0] = smallest_distance\n return path_storage\n smallest_distance = min(smallest_distance, target_distance)\n\n \"\"\"For real though, are we there yet? (did we actually hit?)\"\"\"\n if (\n smallest_distance >= orbital_radius_lower_bound\n and smallest_distance <= orbital_radius_upper_bound\n ):\n \"\"\" SUCCESS! We are in orbit range\"\"\"\n # current velocity vector\n v_x = p_x + y\n v_y = p_y - x\n\n \"\"\"\n We adjust our velocity so the spacecraft enters a closed circular orbit.\n We treat target_distance as a vector from spacecraft to target\n \"\"\"\n\n # project velocity vector onto radial direction unit-vector. This is what we\n # want to subtract from the velocity vector to obtain the tangential component (closed circular orbit)\n v_radial = (\n v_x * target_distance_x + v_y * target_distance_y\n ) / target_distance\n\n # phi is the angle of the radial vector\n cos_phi = target_distance_x / target_distance\n sin_phi = target_distance_y / target_distance\n # project radial velocity component to x and y axes.\n v_x = v_x - v_radial * cos_phi\n v_y = v_y - v_radial * sin_phi\n v_magnitude = sqrt(v_x ** 2 + v_y ** 2)\n\n # Delta-V for the maneuver\n Dv = sqrt(\n v_radial ** 2 + (v_magnitude - target_orbital_velocity_nondim) ** 2\n )\n path_storage.append([x, y, p_x, p_y, h, t])\n success[0] = 1\n score[0] = Dv\n return path_storage\n\n path_storage.append([x, y, p_x, p_y, h, t])\n\n \"\"\"check if we somehow accidentally struck the earth (whoops)\"\"\"\n\n earth_distance = sqrt((x - earth_position_x) ** 2 + (y - earth_position_y) ** 2)\n\n # not necessarily a crash, but we don't want paths that take us to such risky territories\n critical_distance = (earth_celestial_radius / UNIT_LENGTH) ** 2\n if earth_distance < critical_distance:\n # print(\"Anga crashed into the earth!\")\n score[0] = smallest_distance\n return path_storage\n\n # import io\n # with open(\"tests/testsim.log\", \"w\") as file:\n # file.writelines(str(path_storage))\n # print(\"smallest distance =\", smallest_distance)\n score[0] = smallest_distance\n return path_storage\n\n\n# region Unused integrators\n\n\n# @jit\n# def unused_explicit_euler_step(h, x, y, p_x, p_y):\n# # Step 1 - get all time derivatives\n# v_x = get_v_x(y, p_x)\n# v_y = get_v_y(x, p_y)\n# pdot_x = get_pdot_x(x, y, p_y)\n# pdot_y = get_pdot_y(x, y, p_x)\n# # Step 2 - linear extrapolation\n# x = x + v_x * h\n# y = y + v_y * h\n# p_x = p_x + pdot_x * h\n# p_y = p_y + pdot_y * h\n\n# return x, y, p_x, p_y\n\n\n# endregion\n" }, { "alpha_fraction": 0.4718416929244995, "alphanum_fraction": 0.5058345794677734, "avg_line_length": 22.878787994384766, "blob_id": "7b736e3fbac6597768e3b71934f4f451b29b9c50", "content_id": "aab2093930cc2ed64ed42a5f5f07b60e492dc054", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3942, "license_type": "permissive", "max_line_length": 76, "num_lines": 165, "path": "/code/pyscripts/pygmo_ES.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom IPython import display\nimport numpy as np\n\nimport pygmo as pg\nfrom pygmo import algorithm\nimport os\nimport sys\nimport json\nfrom numba import jit, float64, int32, int64\n\nfrom time import time\nimport numpy as np\nfrom random import shuffle\n\n\n# In[2]:\n\n\nimport inspect\nimport math\n\n\n# In[3]:\n\n\n# generate a toy 2D regression dataset\nsz = 100\nX, Y = np.meshgrid(np.linspace(-1, 1, sz), np.linspace(-1, 1, sz))\nmux, muy, sigma = 0.3, -0.3, 4\nG1 = np.exp(-((X - mux) ** 2 + (Y - muy) ** 2) / 2.0 * sigma ** 2)\nmux, muy, sigma = -0.3, 0.3, 2\nG2 = np.exp(-((X - mux) ** 2 + (Y - muy) ** 2) / 2.0 * sigma ** 2)\nmux, muy, sigma = 0.6, 0.6, 2\nG3 = np.exp(-((X - mux) ** 2 + (Y - muy) ** 2) / 2.0 * sigma ** 2)\nmux, muy, sigma = -0.4, -0.2, 3\nG4 = np.exp(-((X - mux) ** 2 + (Y - muy) ** 2) / 2.0 * sigma ** 2)\nG = G1 + G2 - G3 - G4\n# fig,ax = plt.subplots()\n# im = ax.imshow(G, vmin=-1, vmax=1, cmap='jet')\n# plt.axis('off')\n\n\n# In[4]:\n\n\nclass saddle_space:\n def __init__(self):\n pass\n\n @jit(nogil=True)\n def fitness(self, x):\n return [G[int(x[0]), int(x[1])]]\n\n @jit\n def get_bounds(self):\n return ([0, 0], [99, 99])\n\n def get_name(self):\n return f\"saddlespace\"\n\n def plot(self, w, idx):\n pass\n\n\n# In[5]:\n\n\nclass salimans_nes:\n def __init__(self, iter=12):\n super(salimans_nes, self).__init__()\n self.prevx, self.prevy = [], []\n\n self.iter = iter\n\n def evolve(self, pop):\n if len(pop) == 0:\n return pop\n sigma = 3\n alpha = 0.03 # learningrate\n\n # plotting\n plotting = False\n if plotting:\n plt.figure(figsize=(self.iter, self.iter))\n no_rows = int(self.iter / 4 + 1)\n gs = gridspec.GridSpec(no_rows, 4)\n plot_index = 0\n\n # for each iteration, jitter around starting points, and move in the\n # best direction (weighted average jitter coordinates according to\n # fitness score)\n for i in range(self.iter):\n\n if plotting:\n ax1 = plt.subplot(gs[int(i / 4), plot_index])\n plot_index += 1\n if plot_index == 4:\n plot_index = 0\n plt.imshow(G, vmin=-1, vmax=1, cmap=\"jet\")\n\n # get the population\n wl = pop.get_x()\n\n # do the jittering and selection\n j = 0\n for w in wl:\n noise = np.random.randn(200, 2)\n wp = [\n [min(99, max(0, x)), min(99, max(0, y))]\n for [x, y] in np.expand_dims(w, 0) + sigma * noise\n ]\n\n if plotting:\n x, y = zip(*wp)\n plt.scatter(x, y, 4, \"k\", edgecolors=\"face\")\n R = np.array([prob.fitness(wi)[0] for wi in wp])\n R -= R.mean()\n R /= R.std()\n g = np.dot(R, noise)\n u = alpha * g\n w += u # mutate the population\n w = [min(99, max(0, w[0])), min(99, max(0, w[1]))] # bounds\n pop.set_x(j, w) # make the move previously selected\n j += 1\n return pop\n\n def get_name(self):\n return f\"Oisin's big-dick omegafantastic algorithm\"\n\n\n# In[8]:\n\n\ndef pygmo_es():\n uda = salimans_nes(iter=3000)\n udp = saddle_space()\n prob = pg.problem(udp)\n\n archi = pg.archipelago(algo=uda, prob=udp, n=1000, pop_size=300)\n archi.evolve()\n sols = archi.get_champions_f()\n idx = sols.index(min(sols))\n print(\"Done!! Solutions found are: \")\n print(archi.get_champions_f())\n # udp.plot(archi.get_champions_x(),idx)\n\n # pop = pg.population(prob,10,3)\n # algo.evolve(pop)\n\n\n# In[ ]:\n\n\nif __name__ == \"__main__\":\n start = time()\n print(start)\n pygmo_es()\n print(time() - start)\n\n" }, { "alpha_fraction": 0.7667638659477234, "alphanum_fraction": 0.7725947499275208, "avg_line_length": 23.5, "blob_id": "e38340fcea59e91d8190e58e03e863d71d397f02", "content_id": "5fd43749759caa9666c4064513b138ea0e6f372b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 343, "license_type": "permissive", "max_line_length": 59, "num_lines": 14, "path": "/code/multi-run-all-main_bsc.sh", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "### Runs all demos and seaches ###\n\n# Precalculated initial conditions for specific orbit types\npython main_bsc.py leo\npython main_bsc.py llo\npython main_bsc.py h\npython main_bsc.py h3\npython main_bsc.py h1\npython main_bsc.py hr\npython main_bsc.py ls\npython main_bsc.py ll\n# Search for trajectories\npython main_bsc.py sh\npython main_bsc.py sl\n" }, { "alpha_fraction": 0.4251610338687897, "alphanum_fraction": 0.4818112850189209, "avg_line_length": 29.674419403076172, "blob_id": "a1b2179f773b5035d96aeb999c9ed3e83ec78db4", "content_id": "a374bcd77337cc6c008b8dde521255904a3f7be5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5278, "license_type": "permissive", "max_line_length": 132, "num_lines": 172, "path": "/code/pyscripts/lyapunov.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[5]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom orbsim.r3b_2d.analyticals import *\nfrom orbsim.r3b_2d.simulators import run_sim\nfrom orbsim.plotting import orbitplot2d, orbitplot_non_inertial, multi_plot\nfrom orbsim import *\nfrom orbsim.r3b_2d import *\n\n\n# In[25]:\n\n\nN = 4\nexamples = [\n # [\"hohmann\", [-2.086814820119193, -0.000122173047640, 3.111181716545691], 5],\n [\"long_leto\", [3.794182930145708, 0.023901745288554, 3.090702702702703], 200],\n # [\"short_leto\", [-0.138042744751570, -0.144259374836607, 3.127288444444444], 41],\n # [\"3-day_hohmann\", [-2.272183066647597, -0.075821466029764, 3.135519748743719], 3],\n # [\"1-day_hohmann\", [-2.277654673852600, 0.047996554429844, 3.810000000000000], 1],\n] # [title, psi, duration]\nfor title, psi, duration in examples:\n psis = []\n paths = []\n for i in range(N):\n permute_psi = np.array(psi) + np.array([i * 1e-6, i * 1e-6, i * 1e-6])\n path = run_sim(permute_psi, max_iter=1e7, duration=duration)\n psis.append(permute_psi)\n paths.append(path)\n\n # In[26]:\n\n for i in range(len(paths)):\n orbitplot2d(\n paths[i],\n psis[i],\n filepath=\"./lyapunov_figs/trajectories\",\n title=f\"{title}_{i}\",\n )\n orbitplot_non_inertial(\n paths[i],\n psis[i],\n filepath=\"./lyapunov_figs/trajectories\",\n title=f\"{title}_nonin_{i}\",\n )\n\n # In[ ]:\n\n lyaps = []\n for a in range(N):\n for b in range(N):\n print(f\"comparing {a} and {b}\")\n if a >= b:\n continue\n multi_plot(\n [paths[a], paths[b]],\n [psis[a], psis[b]],\n orbitplot2d,\n filepath=\"./lyapunov_figs/trajectories\",\n title=f\"{title}_multi_{a}_and_{b}\",\n )\n multi_plot(\n [paths[a], paths[b]],\n [psis[a], psis[b]],\n orbitplot_non_inertial,\n filepath=\"./lyapunov_figs/trajectories\",\n title=f\"{title}_nonin_multi_{a}_and_{b}\",\n )\n plt.close('all')\n lyap = []\n _a = np.array(paths[a][1]).T\n _b = np.array(paths[b][1]).T\n xas = _a[0]\n yas = _a[1]\n xbs = _b[0]\n ybs = _b[1]\n # hs=(_a[4],_b[4])\n ts = (_a[5], _b[5])\n print(\n f\"length of the trajectory coordinate arrays: {a}: {len(xas)}, {b}: {len(xbs)}\"\n )\n\n _, min_ts = min([(len(x), list(x)) for x in list(ts)])\n print(\n f\"time steps standardized: comparing at {len(min_ts)} points on the trajectory. Last point will be at {max(min_ts)}\"\n )\n for idx in range(len(min_ts)):\n # idx=min_ts[i]\n lyap.append(\n sqrt((xas[idx] - xbs[idx]) ** 2 + (yas[idx] - ybs[idx]) ** 2)\n )\n lyaps.append(lyap)\n print(len(lyaps))\n\n # In[ ]:\n\n loglyaps = []\n for i in range(len(lyaps)):\n lyap = lyaps[i][1:]\n loglyap = [np.log(x) for x in lyap]\n loglyaps.append(loglyap)\n\n # In[ ]:\n\n def find_segments(lyap):\n segments = []\n prev_l = -1e8\n rising = True\n segment = [0]\n for i, l in enumerate(lyap):\n if rising:\n if l < prev_l:\n rising = False\n segment.append(i)\n else:\n if l > prev_l:\n rising = True\n segments.append(segment)\n segment = [i]\n prev_l = l\n if len(segment) == 1:\n segment.append(len(lyap))\n segments.append(segment)\n return segments\n\n # In[ ]:\n\n from scipy import stats\n\n def compute_slope(lyap, filepath=\".\", title=\"derp\"):\n segments = find_segments(lyap)\n plt.plot(range(len(lyap)), lyap, color=\"grey\", alpha=0.5)\n slopes = []\n for lb, ub in [[int(x), int(y)] for [x, y] in segments]:\n if ub - lb < 100:\n continue\n if ub > len(lyap):\n break\n slope, intercept, r_value, p_value, std_err = stats.linregress(\n range(lb, ub), lyap[lb:ub]\n )\n slopes.append(slope)\n # print(slope, intercept)\n line = slope * range(lb, ub) + intercept\n plt.plot([lb, ub], [line[0], line[-1]], color=\"darkred\")\n plt.plot(range(lb, ub), lyap[lb:ub], color=\"teal\")\n mean_slope = np.mean(slopes)\n plt.suptitle(f\"mean slope = {mean_slope}\")\n plt.savefig(f\"{filepath}/{title}.pdf\")\n plt.clf()\n return mean_slope\n\n # In[ ]:\n\n slopes = []\n for i, loglyap in enumerate(loglyaps):\n slopes.append(\n compute_slope(\n loglyap, filepath=\"lyapunov_figs/slopes\", title=f\"{title}_{i}\"\n )\n )\n print(f\"mean slope = {np.mean(slopes)}\")\n\n for i, lyap in enumerate(lyaps):\n print(title)\n print(\n f\"max_dist={max(lyap)}\\nmin_dist={min(lyap[1:])}\\nmean_dist={np.mean(lyap)}\\n\"\n )\n\n" }, { "alpha_fraction": 0.5049260854721069, "alphanum_fraction": 0.802955687046051, "avg_line_length": 28, "blob_id": "f575c46259acbc0107ec5c7048f22cccdfbcf22e", "content_id": "3cfa07e52a99112bbc7784ba49467864b9732f16", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1218, "license_type": "permissive", "max_line_length": 47, "num_lines": 42, "path": "/code/marscudasim/constants.h", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "#pragma once\n\n#define EARTH_ALTITUDE 160.0\n#define LUNAR_ALTITUDE 100.0\n#define ORBITAL_TOLERANCE 10\n#define h_DEFAULT 1e-06\n#define h_MIN 1e-10\n#define STEP_ERROR_TOLERANCE 1e-14\n#define UNIT_LENGTH 149597887.14676577\n#define UNIT_TIME 31558149.10224\n#define UNIT_VELOCITY 4.7403885\n#define UNIT_VELOCITY2 5.4865607661342276e-05\n#define LEO_RADIUS 6538.1\n#define LEO_VELOCITY 7.807950391158399\n#define LLO_RADIUS 1837.1\n#define LLO_VELOCITY 1.633820038353055\n#define SUN_R 0\n#define SUN_THETA 45\n#define SUN_PHI 0\n#define LEO_RADIUS_NONDIM 4.370449425923828e-05\n#define LEO_VELOCITY_NONDIM 1.647111917337239\n#define SUN_ETA 39.47748480942167\n#define EARTH_ETA 0.00011856931682974265\n#define MARS_ETA 1.2739881192904076e-05\n\n#define DAY 86400.0\n#define a_EARTH 149597887.14676577\n#define T_EARTH 31558149.10224\n#define SUN_MASS 1.988435e+30\n#define EARTH_RADIUS 6378.1\n#define EARTH_MASS 5.9721986e+24\n#define LUNAR_RADIUS 1737.1\n#define LUNAR_MASS 7.34767309e+22\n#define EARTH_MOON_DISTANCE 384400.0\n#define LUNAR_ORBIT_DURATION 27.322\n#define MARS_RADIUS 3396.2\n#define MARS_MASS 6.41693e+23\n#define SUN_MU 132709742648.00002\n#define EARTH_MU 398589.31232288\n#define MARS_MU 42827.1041744\n\n#define SUN_RADIUS 695508\n" }, { "alpha_fraction": 0.5548906326293945, "alphanum_fraction": 0.5739232897758484, "avg_line_length": 29.88787841796875, "blob_id": "b94c4700954e102fed3709c43197ee7fb8b40768", "content_id": "e6ff121c9827e7ff5cabc186387b649ddfa6fc4f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10193, "license_type": "permissive", "max_line_length": 93, "num_lines": 330, "path": "/code/orbsim/r4b_3d/simulation.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nRepeatedly run single integration steps for some initial conditions until some stopping\nconditions.\n\n1. `analyticals.py`: set up the equations of motion.\n\n2. `integrators.py`: discretize the equations of motion and defines a single time step of the\n chosen numerical algorithm.\n\n3. `simulation.py`: run the single steps from `integrators.py` repeatedly for some initial\n conditions and stopping conditions.\n\"\"\"\n\nimport logging\nimport time\nfrom decimal import Decimal\n\nimport numpy as np\n\nfrom orbsim import EARTH_RADIUS\nfrom orbsim.r4b_3d import UNIT_LENGTH, UNIT_TIME, UNIT_VELOCITY\nfrom orbsim.r4b_3d.coordinate_system import (\n get_distance_spherical,\n get_position_cartesian_from_spherical,\n get_speed_cartesian,\n get_speed_spherical,\n get_velocity_cartesian_from_spherical,\n get_velocity_spherical_from_cartesian,\n)\nfrom orbsim.r4b_3d.ephemerides import (\n get_coordinates_on_day_rad,\n get_ephemerides,\n get_ephemerides_on_day,\n)\nfrom orbsim.r4b_3d.equations_of_motion import (\n get_B_phi,\n get_B_R,\n get_B_theta,\n get_phidot,\n get_Rdot,\n get_thetadot,\n)\nfrom orbsim.r4b_3d.integrators import euler_step_symplectic, verlet_step_symplectic\n\n\ndef simulate(\n psi,\n max_year=\"2039\",\n h=1 / UNIT_TIME,\n max_duration=1 * 3600 * 24 / UNIT_TIME,\n max_iter=int(1e6),\n):\n \"\"\"Simple simulator that will run a LEO until duration or max_iter is reached.\n\n Keyword Arguments:\n psi {tuple} -- Initial conditions: (day, Q0, B0, burn)\n max_year {string} -- Max year for ephemerides table (default: \"2020\")\n h {float} -- Initial time step size (default: 1/UNIT_LENGTH = 1 second in years)\n max_duration {int} -- Max duration of simulation (in years) (default: {1 day})\n max_iter {int} -- Max number of iterations of simulation (default: {1e6})\n (1e6 iterations corresponds to ~11 days with h = 1 s)\n\n Returns:\n [type] -- [description]\n \"\"\"\n logging.info(\"STARTING: Simple simulation.\")\n t_start = time.time()\n max_iter = int(max_iter)\n\n # Unpack psi\n day = psi[0]\n Q = psi[1]\n B = psi[2]\n delta_v0 = psi[3]\n\n t = day * 3600 * 24 / UNIT_TIME\n t0 = t\n\n # Read ephemerides\n logging.debug(\"Getting ephemerides tables\")\n ephemerides = get_ephemerides(max_year=max_year)\n\n # Apply initial burn if delta_v input is provided\n if delta_v0:\n B = apply_delta_v(Q, B, delta_v0)\n\n # Unpack initial position (Q) and momenta (B)\n R, theta, phi = Q\n B_R, B_theta, B_phi = B\n\n logging.info(f\"Initial coordinates: Q = {B} (R, theta, phi)\")\n logging.info(f\"Initial momenta: B = {B} (B_R, B_theta, B_phi\")\n\n logging.info(\n f\"Starting simulation at time {t} ({day} days) with step size h = {h} \"\n f\"({h*UNIT_TIME} s)\"\n f\", max {max_iter} iterations and max {max_duration*UNIT_TIME/3600/24} days\"\n )\n\n # List initialization\n i = 0\n\n ts = []\n days = []\n Qs = []\n Bs = []\n q_p_list = []\n eph_body_coords = []\n body_distances = []\n\n # Run iteration 0 manually\n ts.append(t)\n days.append(day)\n Qs.append([R, theta, phi])\n Bs.append([B_R, B_theta, B_phi])\n q_p_list.append((Qs[0], Bs[0]))\n t1 = time.time()\n sim_time = t1 - t_start\n logging.info(\n f\"Iteration {str(i).rjust(len(str(max_iter)))} / {max_iter}\"\n f\", in-sim time {format_time(t, time_unit='years')} / \"\n f\"{format_time(max_duration, time_unit='years')}\"\n f\" (out-of-sim elapsed time: {format_time(sim_time)})\"\n )\n\n # Iteration loop\n while True:\n i += 1\n t += h\n day = t * UNIT_TIME / (3600 * 24)\n\n eph_on_day = get_ephemerides_on_day(ephemerides, day)\n eph_coords = get_coordinates_on_day_rad(eph_on_day)\n\n # Save ephemerides coords on day into array\n R_ks, theta_ks, phi_ks = eph_coords\n sun = [R_ks[0], theta_ks[0], phi_ks[0]]\n earth = [R_ks[1], theta_ks[1], phi_ks[1]]\n mars = [R_ks[2], theta_ks[2], phi_ks[2]]\n eph_body_coords.append([sun, earth, mars])\n\n dist_sun = get_distance_spherical(Q, sun) * UNIT_LENGTH\n dist_earth = get_distance_spherical(Q, earth) * UNIT_LENGTH\n dist_mars = get_distance_spherical(Q, mars) * UNIT_LENGTH\n body_distances.append([dist_sun, dist_earth, dist_mars])\n\n if dist_earth < EARTH_RADIUS:\n logging.info(\n f\"STOP: Collision with earth {dist_earth:.6f} \"\n f\"({format_time(max_duration, time_unit='years')}) \"\n f\"reached at t = {t:.6f} ({format_time(t, time_unit='years')})\"\n f\" at iteration: {i}/{max_iter} ~ {i/max_iter*100:.3f} %\"\n )\n break\n\n Q, B = euler_step_symplectic(h, Q, B, eph_coords)\n # Q, B = verlet_step_symplectic(h, Q, B, eph_coords)\n\n ts.append(t)\n days.append(day)\n Qs.append(Q)\n Bs.append(B)\n q_p_list.append((Q, B))\n\n # Log status every 1000 iterations.\n if i % 1000 == 0:\n t1 = time.time()\n sim_time = t1 - t_start\n\n # if i > 10 ^ 4:\n # h = 60 / UNIT_TIME\n # logging.info(f\"At iteration {i}, h now set to {h}\")\n\n # if i > 10 ^ 5:\n # h = 3600 / UNIT_TIME\n # logging.info(f\"At iteration {i}, h now set to {h}\")\n\n # if i > 10 ^ 6:\n # h = 3600 * 12 / UNIT_TIME\n # logging.info(f\"At iteration {i}, h now set to {h}\")\n\n logging.info(\n f\"Iteration {str(i).rjust(len(str(max_iter)))} / {max_iter}\"\n f\", in-sim time {format_time(t, time_unit='years')} / \"\n f\"{format_time(max_duration, time_unit='years')}\"\n f\" (out-of-sim elapsed time: {format_time(sim_time)})\"\n )\n\n # Stop simulation of max duration reached\n if (t - t0) >= max_duration:\n logging.info(\n f\"STOP: Max time of {max_duration:.6f} \"\n f\"({format_time(max_duration, time_unit='years')}) \"\n f\"reached at t = {t:.6f} ({format_time(t, time_unit='years')})\"\n f\" at iteration: {i}/{max_iter} ~ {i/max_iter*100:.3f} %\"\n )\n break\n\n # Stop simulation of max iterations reached\n if i >= max_iter:\n logging.info(\n f\"STOP: Max iter of {max_iter} reached (i={i}) \"\n f\"at t = {format_time(t, time_unit='years')}/\"\n f\"{format_time(max_duration, time_unit='years')} ~ \"\n f\"{t/max_duration:.3f} %)\"\n )\n break\n\n # Check for body collision\n\n t_s = (t - t0) * UNIT_TIME # final in-sim time in seconds\n t_end = time.time()\n T = t_end - t_start # final out-of-sim time in seconds\n\n # Post simulator run logging\n logging.info(\n f\"SIMULATOR PERFORMANCE: Sim/Real time ratio: \"\n f\"{Decimal(t_s / T):.2E} ({(t_s / T):.2f})\"\n )\n logging.info(\n f\"SIMULATOR PERFORMANCE: 1 second can simulate: \"\n f\"{format_time(t_s / T)} (DDD:HH:MM:SS)\"\n )\n logging.info(\n f\"SIMULATOR PERFORMANCE: Time to simulate 1 day: \"\n f\"{format_time(T / t_s * 3600 * 24)} (DDD:HH:MM:SS)\"\n )\n logging.info(\n f\"TIME ELAPSED: In-sim time duration: {format_time(t,time_unit='years')} \"\n f\"(DDD:HH:MM:SS)\"\n )\n logging.info(\n f\"TIME ELAPSED: Out-of-sim time duration: {format_time(T)} (DDD:HH:MM:SS)\"\n )\n\n return (ts, Qs, Bs, (t, i), ephemerides, eph_body_coords, body_distances)\n\n\ndef format_time(time_value, time_unit=\"seconds\"):\n \"\"\"Format time from a single unit (by default seconds) to a DDD:HH:MM:SS string\n\n Arguments:\n time {[float]} -- [Time value in some unit]\n\n Keyword Arguments:\n time_unit {str} -- [Time unit] (default: {\"seconds\"})\n\n Raises:\n ValueError -- [Unsupported input time unit]\n\n Returns:\n [str] -- [String of time formatted as DDD:HH:MM:SS]\n \"\"\"\n\n if time_unit == \"years\":\n time_value = time_value * UNIT_TIME\n elif time_unit == \"seconds\":\n pass\n else:\n raise ValueError(\"Input time must be either 'years' or 'seconds' (default)\")\n\n days = int(time_value // (3600 * 24))\n time_value %= 3600 * 24\n hours = int(time_value // 3600)\n time_value %= 3600\n minutes = int(time_value // 60)\n time_value %= 60\n seconds = time_value\n text = f\"{days:0>3d}:{hours:0>2d}:{minutes:0>2d}:{seconds:0>5.2f}\"\n\n return text\n\n\ndef apply_delta_v(Q, B, delta_v):\n \"\"\"Apply an engine delta-v along velocity vector axis of delta-v\"\"\"\n\n # Unpack velocity and speed in cartesian coordinates\n R, theta, phi = Q\n B_R, B_theta, B_phi = B\n\n Rdot = get_Rdot(B_R)\n thetadot = get_thetadot(R, B_theta)\n phidot = get_phidot(R, theta, B_phi)\n\n Rdot *= UNIT_VELOCITY\n thetadot /= UNIT_TIME\n phidot /= UNIT_TIME\n R *= UNIT_LENGTH\n\n v_speed_spherical = get_speed_spherical(R, theta, Rdot, thetadot, phidot)\n\n v = get_velocity_cartesian_from_spherical([R, theta, phi], [Rdot, thetadot, phidot])\n v_speed = get_speed_cartesian(*v)\n\n logging.debug(\n f\"Speed before burn (from spherical coords): {v_speed_spherical} km/s\"\n )\n logging.info(f\"Speed before burn: {v_speed} km/s\")\n\n # Apply burn\n v_unit = v / np.linalg.norm(v)\n\n v2 = v + v_unit * delta_v\n\n v_speed2 = get_speed_cartesian(*v2)\n\n logging.info(f\"Burn delta-v: {delta_v} km/s\")\n logging.info(f\"Speed after burn: {v_speed2} km/s\")\n\n # Convert cartesian speed post-burn back into B\n pos = get_position_cartesian_from_spherical(R, theta, phi)\n v_postburn_spherical = get_velocity_spherical_from_cartesian(list(pos), v2)\n\n Rdot2, thetadot2, phidot2 = v_postburn_spherical\n R /= UNIT_LENGTH\n\n B_R2 = get_B_R(Rdot2 / UNIT_VELOCITY)\n B_theta2 = get_B_theta(R, thetadot2 * UNIT_TIME)\n B_phi2 = get_B_phi(R, theta, phidot2 * UNIT_TIME)\n\n B2 = [B_R2, B_theta2, B_phi2]\n\n logging.info(f\"B before burn: {B}\")\n logging.info(f\"B after burn: {B}\")\n\n return B2\n\n\n# if __name__ == \"__main__\":\n# simulate()\n" }, { "alpha_fraction": 0.5548226237297058, "alphanum_fraction": 0.5709469318389893, "avg_line_length": 34.71727752685547, "blob_id": "b5e7e7407987e02614b495f32dd292c63222586c", "content_id": "b5a26b8d58ee3550c3e32842ac850780fa266a67", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6822, "license_type": "permissive", "max_line_length": 115, "num_lines": 191, "path": "/code/orbsim/r4b_3d/mplotting.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\nimport matplotlib\n\nmatplotlib.use(\"Qt5Agg\")\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nfrom matplotlib.animation import ImageMagickFileWriter\nimport numpy as np\nfrom mpl_toolkits.mplot3d.art3d import Line3D\n\nfrom orbsim.r4b_3d import UNIT_TIME\nfrom orbsim.r4b_3d.coordinate_system import get_position_cartesian_from_spherical\nfrom orbsim.r4b_3d.ephemerides import get_ephemerides, get_ephemerides_on_day\n\n\ndef all_plots_r4b_orbitplot(qs, ts, t_final, max_year):\n fig = plt.figure()\n ax1 = fig.add_subplot(\"221\", projection=\"3d\")\n ax2 = fig.add_subplot(\"222\", projection=\"3d\")\n ax3 = fig.add_subplot(\"223\", projection=\"3d\")\n for ax in [ax1, ax2, ax3]:\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_zlabel(\"z\")\n r4b_orbit = R4bOrbit(qs, ts, t_final, max_year, ax1)\n r4b_orbit.zoom_orbit(ax2)\n r4b_orbit.r4b_orbitplot(ax3)\n # exit(0)\n # ani = animation.FuncAnimation(\n # fig, r4b_orbit.update, range(len(qs)), interval=20, blit=False\n # ) # Turn off blitting if you want to rotate the plot. Turn it on if you wanna go fast\n # plt.rcParams[\n # \"animation.convert_path\"\n # ] = \"C:\\Program Files\\ImageMagick-7.0.8-Q16\\magick.exe\" # \"/usr/local/bin/magick\"\n # writer = ImageMagickFileWriter()\n # ani.save(f\"{str(Path.home())}/animation.mp4\", writer=writer)\n\n plt.show()\n\n\nclass R4bOrbit(object):\n def __init__(self, qs, ts, t_final, max_year, ax):\n eph = get_ephemerides(max_year=max_year)\n earth = eph[\"earth\"]\n mars = eph[\"mars\"]\n # ts is in years, and is as such very small. we scale it to days, to fit with eph\n days_ts = [(t * UNIT_TIME) / 3600 / 24 for t in ts]\n\n qs = [get_position_cartesian_from_spherical(x, y, z) for x, y, z in qs]\n self.xs, self.ys, self.zs = np.array(\n qs\n ).T # get individual coordinate sets for plotting\n\n self.t_final = t_final\n ax.set_title(\"animation\")\n\n self.ani_ax = ax\n\n xs_earth = []\n ys_earth = []\n zs_earth = []\n xs_mars = []\n ys_mars = []\n zs_mars = []\n for t in days_ts:\n t_eph = get_ephemerides_on_day(eph, day_index=t)\n xs_earth.append(t_eph[\"earth\"][\"x\"])\n ys_earth.append(t_eph[\"earth\"][\"y\"])\n zs_earth.append(t_eph[\"earth\"][\"z\"])\n xs_mars.append(t_eph[\"mars\"][\"x\"])\n ys_mars.append(t_eph[\"mars\"][\"y\"])\n zs_mars.append(t_eph[\"mars\"][\"z\"])\n\n self.xs_earth = xs_earth\n self.ys_earth = ys_earth\n self.zs_earth = zs_earth\n\n self.xs_mars = xs_mars\n self.ys_mars = ys_mars\n self.zs_mars = zs_mars\n\n self.earth_xdata = [self.xs_earth[0]]\n self.earth_ydata = [self.ys_earth[0]]\n self.earth_zdata = [self.zs_earth[0]]\n self.earth_line = Line3D(\n self.earth_xdata, self.earth_ydata, self.earth_zdata, color=\"deepskyblue\"\n )\n self.ani_ax.add_line(self.earth_line)\n\n self.mars_xdata = [self.xs_mars[0]]\n self.mars_ydata = [self.ys_mars[0]]\n self.mars_zdata = [self.zs_mars[0]]\n self.mars_line = Line3D(\n self.mars_xdata, self.mars_ydata, self.mars_zdata, color=\"orange\"\n )\n self.ani_ax.add_line(self.mars_line)\n\n self.xdata = [self.xs[0]]\n self.ydata = [self.ys[0]]\n self.zdata = [self.zs[0]]\n self.traj_line = Line3D(self.xdata, self.ydata, self.zdata, color=\"black\")\n self.ani_ax.add_line(self.traj_line)\n # -- SUN --\n ax.scatter(0, 0, 0, c=\"gold\", marker=\"o\")\n\n self.ani_ax.set_xlim(-1.5, 1.5)\n self.ani_ax.set_ylim(-1.5, 1.5)\n self.ani_ax.set_zlim(-1, 1)\n\n self.ani_terminate = False\n\n def update(self, i):\n if self.ani_terminate:\n return self.traj_line, self.earth_line, self.mars_line\n if i == len(self.xs) - 1:\n self.ani_ax.set_title(\"animation... DONE\")\n self.ani_ax.scatter(self.xs[-1], self.ys[-1], self.zs[-1], color=\"black\")\n self.ani_ax.scatter(\n self.xs_earth[-1],\n self.ys_earth[-1],\n self.zs_earth[-1],\n color=\"deepskyblue\",\n )\n self.ani_ax.scatter(\n self.xs_mars[-1], self.ys_mars[-1], self.zs_mars[-1], color=\"orange\"\n )\n self.ani_terminate = True\n # plt.close()\n x = self.xs[i]\n y = self.ys[i]\n z = self.zs[i]\n self.xdata.append(x)\n self.ydata.append(y)\n self.zdata.append(z)\n self.traj_line.set_data(self.xdata, self.ydata)\n self.traj_line.set_3d_properties(zs=self.zdata)\n\n self.earth_xdata.append(self.xs_earth[i])\n self.earth_ydata.append(self.ys_earth[i])\n self.earth_zdata.append(self.zs_earth[i])\n self.earth_line.set_data(self.earth_xdata, self.earth_ydata)\n self.earth_line.set_3d_properties(zs=self.earth_zdata)\n\n self.mars_xdata.append(self.xs_mars[i])\n self.mars_ydata.append(self.ys_mars[i])\n self.mars_zdata.append(self.zs_mars[i])\n self.mars_line.set_data(self.mars_xdata, self.mars_ydata)\n self.mars_line.set_3d_properties(zs=self.mars_zdata)\n return self.traj_line, self.earth_line, self.mars_line\n\n def r4b_orbitplot(self, ax):\n\n ax.set_title(\"static holistic plot\")\n\n ax.plot(self.xs, self.ys, self.zs, color=\"black\")\n\n # -- EARTH --\n ax.plot(\n self.xs_earth, self.ys_earth, self.zs_earth, color=\"deepskyblue\"\n ) # plot lines\n\n # -- MARS --\n ax.plot(self.xs_mars, self.ys_mars, self.zs_mars, color=\"orange\") # plot lines\n\n # -- SUN --\n\n ax.scatter(self.xs[-1], self.ys[-1], self.zs[-1], color=\"black\")\n ax.scatter(\n self.xs_earth[-1], self.ys_earth[-1], self.zs_earth[-1], color=\"deepskyblue\"\n )\n ax.scatter(self.xs_mars[-1], self.ys_mars[-1], self.zs_mars[-1], color=\"orange\")\n ax.scatter(0, 0, 0, c=\"gold\", marker=\"o\")\n\n def zoom_orbit(self, ax):\n \"\"\"orbit without mars, so we can see earth and spaceship trajectory as they move through space together.\"\"\"\n earth_zoomline = Line3D(\n list(self.xs_earth),\n list(self.ys_earth),\n list(self.zs_earth),\n color=\"deepskyblue\",\n )\n traj_zoomline = Line3D(self.xs, self.ys, self.zs, color=\"black\")\n ax.set_title(\"zoomed in to spaceship\")\n ax.scatter(0, 0, 0, c=\"gold\", marker=\"o\") # Sun\n ax.add_line(earth_zoomline)\n ax.add_line(traj_zoomline)\n ax.set_xlim(min(self.xs), max(self.xs))\n ax.set_ylim(min(self.ys), max(self.ys))\n ax.set_zlim(min(self.zs), max(self.zs))\n" }, { "alpha_fraction": 0.7605633735656738, "alphanum_fraction": 0.7781690359115601, "avg_line_length": 39.71428680419922, "blob_id": "d8eb81d9567b35626832fb0b6fa4323740dce000", "content_id": "b5348d4fe36de9ac577edf3c9152181b9d2be1c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 284, "license_type": "permissive", "max_line_length": 192, "num_lines": 7, "path": "/code/README.md", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "## r3b-mooon\n\nRestricted three-body problem to the Moon.\n\nCloned codebase from BSc project \"BSc Thesis \"Low Energy Transfer Orbits - a Theoretical and Numerical Study\" at the Technical University of Denmark, Spring 2015, by Gandalf Saxe and advisor Hans Henrik Brandenborg Sørensen.\n\nTo be refactored and cleaned up." }, { "alpha_fraction": 0.572402834892273, "alphanum_fraction": 0.5934303402900696, "avg_line_length": 28.722972869873047, "blob_id": "b400b3e787e62cc43a0116f91c1826e080807eeb", "content_id": "5cbc1b71b78f93c32f8df97c87ded94330fb97f5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8798, "license_type": "permissive", "max_line_length": 88, "num_lines": 296, "path": "/code/marscudasim/simulators.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nRepeatedly run single integration steps for some initial conditions until some stopping\nconditions.\n\"\"\"\n\nimport logging\nimport time\nfrom decimal import Decimal\nimport numpy as np\n\nfrom orbsim.r4b_3d import UNIT_TIME\nfrom new_ephemerides import (\n get_coordinates_on_day_rad,\n get_ephemerides,\n get_ephemerides_on_day,\n)\n\n# from ctypes import cdll\nfrom ctypes import *\n\ncudasim = cdll.LoadLibrary(\"./libcudasim.so\")\n\nfrom math import pi\n\ndef simulate(\n psi,\n max_year=\"2039\",\n h=1 / UNIT_TIME,\n max_duration=1 * 3600 * 24 / UNIT_TIME,\n max_iter=int(1e6),\n):\n \"\"\"Simple simulator that will run a LEO until duration or max_iter is reached.\n\n Keyword Arguments:\n psi {tuple} -- Initial conditions: (day, Q0, B0, burn)\n max_year {string} -- Max year for ephemerides table (default: \"2020\")\n h {float} -- Initial time step size (default: 1/UNIT_LENGTH = 1 second in years)\n max_duration {int} -- Max duration of simulation (in years) (default: {1 day})\n max_iter {int} -- Max number of iterations of simulation (default: {1e6})\n (1e6 iterations corresponds to ~11 days with h = 1 s)\n\n Returns:\n [type] -- [description]\n \"\"\"\n logging.info(\"STARTING: Simple simulation.\")\n t0 = time.time()\n max_iter = int(max_iter)\n\n # Unpack psi\n days = np.array(psi[0])\n ts = days * (3600 * 24) / UNIT_TIME\n Qs = np.array(psi[1])\n Bs = np.array(psi[2])\n nPaths = Qs.shape[0]\n\n # Read ephemerides\n logging.debug(\"Getting ephemerides tables\")\n ephemerides = get_ephemerides(max_year=max_year)\n earth = np.array(ephemerides['earth'])\n mars = np.array(ephemerides['mars'])\n\n \"\"\"\n make list of all paths to integrate\n \"\"\"\n\n ts = np.asarray(ts)\n Rs = np.array(Qs[:,0])\n thetas = np.array(Qs[:,1])\n phis = np.array(Qs[:,2])\n B_Rs = np.array(Bs[:,0])\n B_thetas = np.array(Bs[:,1])\n B_phis = np.array(Bs[:,2])\n arives = np.zeros(nPaths)\n scores = np.zeros(nPaths)\n cudasim.simulate.restype = None\n cudasim.simulate.argtypes = [\n c_int,\n c_double,\n c_double,\n c_int,\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n c_int,\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n ]\n\n earth_R = earth[:,3].astype(np.float64)\n earth_theta = earth[:,4].astype(np.float64) * pi / 180\n earth_phi = earth[:,5].astype(np.float64) * pi / 180\n mars_R = mars[:,3].astype(np.float64)\n mars_theta = mars[:,4].astype(np.float64) * pi / 180\n mars_phi = mars[:,5].astype(np.float64) * pi / 180\n\n ts_ctype = ts.ctypes.data_as(POINTER(c_double))\n Rs_ctype = Rs.ctypes.data_as(POINTER(c_double))\n thetas_ctype = thetas.ctypes.data_as(POINTER(c_double))\n phis_ctype = phis.ctypes.data_as(POINTER(c_double))\n B_Rs_ctype = B_Rs.ctypes.data_as(POINTER(c_double))\n B_thetas_ctype = B_thetas.ctypes.data_as(POINTER(c_double))\n B_phis_ctype = B_phis.ctypes.data_as(POINTER(c_double))\n\n earth_R_ctype = earth_R.ctypes.data_as(POINTER(c_double))\n earth_theta_ctype = earth_theta.ctypes.data_as(POINTER(c_double))\n earth_phi_ctype = earth_phi.ctypes.data_as(POINTER(c_double))\n mars_R_ctype = mars_R.ctypes.data_as(POINTER(c_double))\n mars_theta_ctype = mars_theta.ctypes.data_as(POINTER(c_double))\n mars_phi_ctype = mars_phi.ctypes.data_as(POINTER(c_double))\n arive_ctype = arives.ctypes.data_as(POINTER(c_double))\n score_ctype = scores.ctypes.data_as(POINTER(c_double))\n cudasim.simulate(\n nPaths,\n h,\n max_duration,\n int(max_iter),\n ts_ctype,\n Rs_ctype,\n thetas_ctype,\n phis_ctype,\n B_Rs_ctype,\n B_thetas_ctype,\n B_phis_ctype,\n int(earth_R.size),\n earth_R_ctype,\n earth_theta_ctype,\n earth_phi_ctype,\n mars_R_ctype,\n mars_theta_ctype,\n mars_phi_ctype,\n arive_ctype,\n score_ctype,\n )\n\n return arives, scores\n\ndef simulate_single(\n psi,\n max_year=\"2039\",\n h=1 / UNIT_TIME,\n max_duration=1 * 3600 * 24 / UNIT_TIME,\n max_iter=int(1e6),\n):\n logging.info(\"STARTING: Simple simulation.\")\n t0 = time.time()\n max_iter = int(max_iter)\n\n # Unpack psi\n days = np.array(psi[0])\n ts = days * (3600 * 24) / UNIT_TIME\n Qs = np.array(psi[1])\n Bs = np.array(psi[2])\n nSteps = int(max_duration / (h - 1e-14))\n\n # Read ephemerides\n logging.debug(\"Getting ephemerides tables\")\n ephemerides = get_ephemerides(max_year=max_year)\n\n earth = np.array(ephemerides['earth'])\n mars = np.array(ephemerides['mars'])\n ts = np.asarray(ts)\n Rs = np.array(Qs[:,0])\n thetas = np.array(Qs[:,1])\n phis = np.array(Qs[:,2])\n B_Rs = np.array(Bs[:,0])\n B_thetas = np.array(Bs[:,1])\n B_phis = np.array(Bs[:,2])\n ts_out = np.zeros(nSteps)\n Qs_out = np.zeros((nSteps, 3))\n #ts_out[:] = 0\n #Qs_out[:] = 0\n #ts_out = np.repeat(0.0, nSteps)\n #Qs_out = np.repeat(0.0, nSteps*3)\n #Qs_out.shape = (nSteps, 3)\n #print(\"nSteps=\", nSteps, \"size=\", Qs_out.size)\n i_final = np.zeros(1, int)\n cudasim.simulate_cpu.restype = None\n cudasim.simulate_cpu.argtypes = [\n c_double,\n c_double,\n c_int,\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n c_int,\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_double),\n POINTER(c_int),\n ]\n\n earth_R = earth[:,3].astype(np.float64)\n earth_theta = earth[:,4].astype(np.float64) * pi / 180\n earth_phi = earth[:,5].astype(np.float64) * pi / 180\n mars_R = mars[:,3].astype(np.float64)\n mars_theta = mars[:,4].astype(np.float64) * pi / 180\n mars_phi = mars[:,5].astype(np.float64) * pi / 180\n\n ts_ctype = ts.ctypes.data_as(POINTER(c_double))\n Rs_ctype = Rs.ctypes.data_as(POINTER(c_double))\n thetas_ctype = thetas.ctypes.data_as(POINTER(c_double))\n phis_ctype = phis.ctypes.data_as(POINTER(c_double))\n B_Rs_ctype = B_Rs.ctypes.data_as(POINTER(c_double))\n B_thetas_ctype = B_thetas.ctypes.data_as(POINTER(c_double))\n B_phis_ctype = B_phis.ctypes.data_as(POINTER(c_double))\n\n earth_R_ctype = earth_R.ctypes.data_as(POINTER(c_double))\n earth_theta_ctype = earth_theta.ctypes.data_as(POINTER(c_double))\n earth_phi_ctype = earth_phi.ctypes.data_as(POINTER(c_double))\n mars_R_ctype = mars_R.ctypes.data_as(POINTER(c_double))\n mars_theta_ctype = mars_theta.ctypes.data_as(POINTER(c_double))\n mars_phi_ctype = mars_phi.ctypes.data_as(POINTER(c_double))\n ts_out_ctype = ts_out.ctypes.data_as(POINTER(c_double))\n Qs_out_ctype = Qs_out.ctypes.data_as(POINTER(c_double))\n i_final_ctype = i_final.ctypes.data_as(POINTER(c_int))\n cudasim.simulate_cpu(\n h,\n max_duration,\n int(max_iter),\n ts_ctype,\n Rs_ctype,\n thetas_ctype,\n phis_ctype,\n B_Rs_ctype,\n B_thetas_ctype,\n B_phis_ctype,\n int(earth_R.size),\n earth_R_ctype,\n earth_theta_ctype,\n earth_phi_ctype,\n mars_R_ctype,\n mars_theta_ctype,\n mars_phi_ctype,\n ts_out_ctype,\n Qs_out_ctype,\n i_final_ctype,\n )\n\n return ts_out, Qs_out, i_final\n\ndef format_time(time_value, time_unit=\"seconds\"):\n \"\"\"Format time from a single unit (by default seconds) to a DDD:HH:MM:SS string\n\n Arguments:\n time {[float]} -- [Time value in some unit]\n\n Keyword Arguments:\n time_unit {str} -- [Time unit] (default: {\"seconds\"})\n\n Raises:\n ValueError -- [Unsupported input time unit]\n\n Returns:\n [str] -- [String of time formatted as DDD:HH:MM:SS]\n \"\"\"\n\n if time_unit == \"years\":\n time_value = time_value * UNIT_TIME\n elif time_unit == \"seconds\":\n pass\n else:\n raise ValueError(\"Input time must be either 'years' or 'seconds' (default)\")\n\n days = int(time_value // (3600 * 24))\n time_value %= 3600 * 24\n hours = int(time_value // 3600)\n time_value %= 3600\n minutes = int(time_value // 60)\n time_value %= 60\n seconds = time_value\n text = f\"{days:0>3d}:{hours:0>2d}:{minutes:0>2d}:{seconds:0>5.2f}\"\n\n return text\n\n\n# if __name__ == \"__main__\":\n# simulate()\n" }, { "alpha_fraction": 0.6889401078224182, "alphanum_fraction": 0.6889401078224182, "avg_line_length": 30, "blob_id": "37eb98140f04d857e44fd140b67df3db155edb28", "content_id": "aad7bc28ff10da3e5fa9e6afa63e5d4572c3e1f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 434, "license_type": "permissive", "max_line_length": 124, "num_lines": 14, "path": "/report/makefile", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "XELATEX=xelatex -file-line-error -interaction=nonstopmode\n\n.PHONY: all\nall: thesis-letomes.tex\n\tlatexmk -pdf -pdflatex=\"$(XELATEX)\" -use-make thesis-letomes.tex\n\n.PHONY: auto\nauto: thesis-letomes.tex\n\tlatexmk -pdf -pdflatex=\"$(XELATEX)\" -use-make -pvc thesis-letomes.tex\n\n.PHONY: clean\nclean:\n\trm -f thesis-letomes.pdf *.aux *.bbl *bcf *.blg *.log *.out *.tdo *.toc *.xdv *.synctex.gz *.fdb_latexmk *.fls *.run.xml; \\\n\trm -f **/*.aux\n" }, { "alpha_fraction": 0.5340206027030945, "alphanum_fraction": 0.5593814253807068, "avg_line_length": 36.02289962768555, "blob_id": "0b1e9ca3d46e5749ca0b64b76269f1b408a5d3d8", "content_id": "bbc10b165ae1820c88156f740aeefa48f7e94c1c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4850, "license_type": "permissive", "max_line_length": 100, "num_lines": 131, "path": "/code/orbsim/r3b_2d/trajplot.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "from math import cos, pi, sin, sqrt\n\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\nimport numpy as np\n\nfrom orbsim.r3b_2d import EARTH_POSITION_X, LUNAR_POSITION_X\nfrom orbsim.r3b_2d.simulators import run_sim\n\n\n\nclass TrajPlot(object):\n def __init__(self, completed_path, ax, inertial_mode=True):\n self.score, _, self.path = completed_path # [Dv,success?,[x,y,px,py,h]]\n xs, ys, _, _, hs, ts = np.array(self.path).T\n self.ani_ax = ax\n self.idxs = get_idxs(hs)\n self.inertial_mode = inertial_mode\n\n if self.inertial_mode:\n self.xs_traj = xs\n self.ys_traj = ys\n else:\n self.xs_traj = xs * np.cos(ts) - ys * np.sin(ts)\n self.ys_traj = xs * np.sin(ts) + ys * np.cos(ts)\n\n self.xs_earth = EARTH_POSITION_X * np.cos(ts)\n self.ys_earth = EARTH_POSITION_X * np.sin(ts)\n\n self.xs_moon = LUNAR_POSITION_X * np.cos(ts)\n self.ys_moon = LUNAR_POSITION_X * np.sin(ts)\n\n self.earth_xdata = [self.xs_earth[0]]\n self.earth_ydata = [self.ys_earth[0]]\n self.earth_line, = self.ani_ax.plot(\n self.earth_xdata, self.earth_ydata, color=\"red\"\n )\n\n if self.inertial_mode:\n circle_x = [LUNAR_POSITION_X * cos(x / 100.0 * 2 * pi) for x in range(0, 101)]\n circle_y = [LUNAR_POSITION_X * sin(x / 100.0 * 2 * pi) for x in range(0, 101)]\n self.ani_ax.plot(circle_x,circle_y, color=\"grey\", alpha = 0.3)\n self.ani_ax.scatter([LUNAR_POSITION_X],[0], color=\"grey\", s=6)\n self.moon_line = None\n else:\n self.moon_xdata = [self.xs_moon[0]]\n self.moon_ydata = [self.ys_moon[0]]\n self.moon_line, = self.ani_ax.plot(\n self.moon_xdata, self.moon_ydata, color=\"grey\", alpha=0.3\n )\n\n self.traj_xdata = [self.xs_traj[0]]\n self.traj_ydata = [self.ys_traj[0]]\n self.traj_line, = self.ani_ax.plot(\n self.traj_xdata, self.traj_ydata, color=\"black\"\n )\n self.ani_ax.set_xlim((-1.01, 1.01))\n self.ani_ax.set_ylim((-1.01, 1.01))\n self.ani_ax.set_aspect(1)\n plt.axis(\"off\")\n\n self.ani_terminate = False\n self.restart_moon = False\n\n def update(self, i):\n if self.ani_terminate:\n if self.inertial_mode:\n return (self.traj_line,self.earth_line,)\n else:\n return (\n self.traj_line,\n self.earth_line,\n self.moon_line,\n ) # only important when using plt.show\n if i == len(self.idxs) - 1:\n self.ani_ax.scatter(self.xs_traj[-1], self.ys_traj[-1], color=\"black\")\n self.ani_ax.scatter(\n self.xs_earth[-1], self.ys_earth[-1], color=\"deepskyblue\"\n )\n self.ani_ax.scatter(self.xs_moon[-1], self.ys_moon[-1], color=\"grey\")\n self.ani_terminate = True\n x = self.xs_traj[self.idxs[i]]\n y = self.ys_traj[self.idxs[i]]\n self.traj_xdata.append(x)\n self.traj_ydata.append(y)\n self.traj_line.set_data(self.traj_xdata, self.traj_ydata)\n\n self.earth_xdata.append(self.xs_earth[self.idxs[i]])\n self.earth_ydata.append(self.ys_earth[self.idxs[i]])\n self.earth_line.set_data(self.earth_xdata, self.earth_ydata)\n if not self.inertial_mode:\n if self.ys_moon[self.idxs[i]] < 0 and self.ys_moon[self.idxs[i+1]] > 0:\n self.moon_xdata = []\n self.moon_ydata = []\n self.moon_xdata.append(self.xs_moon[self.idxs[i]])\n self.moon_ydata.append(self.ys_moon[self.idxs[i]])\n self.moon_line.set_data(self.moon_xdata, self.moon_ydata)\n return self.traj_line, self.earth_line, self.moon_line\n return self.traj_line, self.earth_line\n\n\ndef get_idxs(hs):\n idxs = []\n tally = 0\n for i in range(\n len(hs)\n ): # each time step h, check whether the little tally has reached our threshold.\n h = hs[i] # if it has, take that index as a time step\n tally += h\n if tally >= 3.5e-3:\n idxs.append(i)\n tally = 0\n return idxs\n\n\nif __name__ == \"__main__\":\n fig = plt.figure()\n ax = fig.gca()\n cpath = run_sim(\n [3.794183030145708, 0.023901845288554, 3.090703702702703], duration=200\n )\n trajp = TrajPlot(cpath, ax)\n ani = animation.FuncAnimation(fig, trajp.update, range(len(trajp.idxs)), interval=0.1,blit=True)\n # plt.rcParams[\n # \"animation.convert_path\"\n # ] = \"C:\\Program Files\\ImageMagick-7.0.8-Q16\\magick.exe\" # \"/usr/local/bin/magick\"\n # writer = ImageMagickFileWriter()\n # ani.save(f\"{str(Path.home())}/animation.mp4\", writer=\"ffmpeg\", fps=90)\n plt.show()\n" }, { "alpha_fraction": 0.45866432785987854, "alphanum_fraction": 0.4800727069377899, "avg_line_length": 26.40590476989746, "blob_id": "e5ac5649825a25ec2e4c23ca262eb4137488ef77", "content_id": "8bdb5ff313718ae5663d43bda74bc53f5ea8a62b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14854, "license_type": "permissive", "max_line_length": 397, "num_lines": 542, "path": "/code/r3b_bsc/reduced3body.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nReduced 3-Body Problem Solver Module\n====================================\nA collection of various numerical solvers for the reduced 3-body problem consisting of two larger masses (Earth, Moon) and one smaller moving in their gravitational field (a satellite). The solution assumes Earth-Moon center of mass as origin and a cartesian x-y coordinate system rotating with the lines connecting the Earth and Moon (non-inertial frame accounted for in the equations of motion).\n\nFunctions:\n\nWe assume **TODO FILL OUT HERE!\n\n\"\"\"\n\nimport time\nfrom math import pi, sqrt\n\nimport numpy as np\n\nfrom orbsim import DAY\nfrom orbsim.r3b_2d import (\n LLO_RADIUS,\n LLO_VELOCITY,\n UNIT_LENGTH,\n UNIT_TIME,\n UNIT_VELOCITY,\n)\n\nfrom .search import print_search_results, search, search_mt\nfrom .symplectic import symplectic\n\n# from orbsim.constants import *\n\n\n# ** pos, ang, burn are not used for anything beside printing\ndef trajectory(n, duration, pos, ang, burn, x0, y0, p0_x, p0_y):\n \"\"\"Integrate trajectory for the reduced 3-body problem.\n\n Args:\n n (int): Positions stored.\n duration (float): Time duration of simulation.\n x0 (float): Initial x-coordinate\n y0 (float): Initial y-coordinate\n p0_x (float): Initial generalized x-momentum\n p0_y (float): Initial generalized y-momentum\n\n Returns:\n Tuple of time-, x-, y-, p_x- and p_y lists.\n \n \"\"\"\n print(\"# Running trajectory.\")\n\n # Initialize arrays\n ts = np.linspace(0, duration, n)\n xs = np.zeros(n)\n ys = np.zeros(n)\n p_xs = np.zeros(n)\n p_ys = np.zeros(n)\n step_errors = np.zeros(n)\n h_list = np.zeros(n)\n info = np.zeros(2)\n\n # Find orbits\n runtime = time.time()\n status = symplectic(\n n, duration, x0, y0, p0_x, p0_y, xs, ys, p_xs, p_ys, step_errors, h_list, info\n )\n runtime = time.time() - runtime\n\n # Display result\n print_search_results(status, pos, ang, burn, x0, y0, p0_x, p0_y, info[0], info[1])\n print(\"# Runtime = %3.2fs\" % (runtime))\n return ts, xs, ys, p_xs, p_ys, step_errors, h_list\n\n\ndef hohmann(threads, n):\n \"\"\"Finding Hohmann trajectory for the reduced 3-body problem.\n\n Args:\n n (int): Positions stored.\n\n Returns:\n Tuple of time-, x-, y-, p_x- and p_y lists.\n \n \"\"\"\n\n print(\"# Running Hohmann.\")\n\n # Hohmann trajectory < 6 days\n duration = 6 / UNIT_TIME\n best_total_dv = 1e9\n positions = 100\n angles = 1\n burns = 200\n pos = -3 * pi / 4\n ang = 0\n burn = 3.11 / UNIT_VELOCITY # Forward Hohmann\n # burn_low = -3.14/unit_velocity # Reverse Hohmann\n\n # Super fast Hohmann trajectory < 1 days\n # duration = 3/unit_time\n # best_total_dv = 1e9\n # positions = 10\n # angles = 10\n # burns = 200\n # pos = -3*pi/4\n # ang = 0\n # burn = 3.7/unit_velocity # Forward Hohmann\n\n pos_range = pi / 4\n ang_range = pi / 8\n burn_range = 0.1 / UNIT_VELOCITY\n\n # Start search\n searches = 0\n max_searches = 5\n while searches < max_searches:\n runtime = time.time()\n searches += 1\n print(\"############## Search %i ###############\" % (searches))\n print(\"# pos = %f\" % (pos))\n print(\"# ang = %f\" % (ang))\n print(\"# burn = %f\" % (burn))\n pos_low = pos - pos_range\n pos_high = pos + pos_range\n ang_low = ang - ang_range\n ang_high = ang + ang_range\n burn_low = burn - burn_range\n burn_high = burn + burn_range\n stat, pos, ang, burn, x0, y0, p0_x, p0_y, dv, toa = search_mt(\n threads,\n 1,\n duration,\n positions,\n angles,\n burns,\n pos_low,\n pos_high,\n ang_low,\n ang_high,\n burn_low,\n burn_high,\n )\n\n if stat < 0:\n total_dv = abs(burn) + dv\n if best_total_dv > total_dv:\n best_total_dv = total_dv\n best_stat = stat\n best_pos = pos\n best_ang = ang\n best_burn = burn\n best_x0 = x0\n best_y0 = y0\n best_p0_x = p0_x\n best_p0_y = p0_y\n best_dv = dv\n best_toa = toa\n else:\n break\n\n pos_range *= 0.1\n ang_range *= 0.1\n burn_range *= 0.1\n\n runtime = time.time() - runtime\n print(\"# Search runtime = %3.2fs\" % (runtime))\n\n # Print best result\n print(\"################ Best ################\")\n print(\"# Best dV(total) = %f km/s\" % (best_total_dv * UNIT_VELOCITY))\n print_search_results(\n best_stat,\n best_pos,\n best_ang,\n best_burn,\n best_x0,\n best_y0,\n best_p0_x,\n best_p0_y,\n best_dv,\n best_toa,\n )\n\n # Initialize arrays\n ts = np.linspace(0, duration, n)\n xs = np.zeros(n)\n ys = np.zeros(n)\n p_xs = np.zeros(n)\n p_ys = np.zeros(n)\n step_errors = np.zeros(n)\n h_list = np.zeros(n)\n info = np.zeros(2)\n\n # Do trajectory\n duration = 10 / UNIT_TIME\n status = symplectic(\n n, duration, x0, y0, p0_x, p0_y, xs, ys, p_xs, p_ys, step_errors, h_list, info\n )\n\n return ts, xs, ys, p_xs, p_ys, step_errors, h_list\n\n\ndef low_energy(threads, n):\n \"\"\"Finding low energy transfer trajectory for the reduced 3-body problem.\n\n Args:\n n (int): Positions stored.\n\n Returns:\n Tuple of time-, x-, y-, p_x- and p_y lists.\n \n \"\"\"\n\n print(\"# Running low_energy.\")\n\n # Low-energy trajectory < 200 days\n duration = 200 / UNIT_TIME\n best_total_dv = 1e9\n positions = 100\n angles = 1\n burns = 200\n pos = -3 * pi / 4\n ang = 0\n burn = 3.12 / UNIT_VELOCITY\n pos_range = pi\n ang_range = 0\n burn_range = 0.01 / UNIT_VELOCITY\n\n # Start search\n searches = 0\n max_searches = 1\n while searches < max_searches:\n runtime = time.time()\n searches += 1\n print(\"############## Search %i ###############\" % (searches))\n print(\"# pos = %f\" % (pos))\n print(\"# ang = %f\" % (ang))\n print(\"# burn = %f\" % (burn))\n pos_low = pos - pos_range\n pos_high = pos + pos_range\n ang_low = ang - ang_range\n ang_high = ang + ang_range\n burn_low = burn - burn_range\n burn_high = burn + burn_range\n stat, pos, ang, burn, x0, y0, p0_x, p0_y, dv, toa = search_mt(\n threads,\n 1,\n duration,\n positions,\n angles,\n burns,\n pos_low,\n pos_high,\n ang_low,\n ang_high,\n burn_low,\n burn_high,\n )\n\n if stat < 0:\n total_dv = abs(burn) + dv\n if best_total_dv > total_dv:\n best_total_dv = total_dv\n best_stat = stat\n best_pos = pos\n best_ang = ang\n best_burn = burn\n best_x0 = x0\n best_y0 = y0\n best_p0_x = p0_x\n best_p0_y = p0_y\n best_dv = dv\n best_toa = toa\n else:\n break\n\n pos_range *= 0.1\n ang_range *= 0.1\n burn_range *= 0.1\n\n runtime = time.time() - runtime\n print(\"# Search runtime = %3.2fs\" % (runtime))\n\n # Initialize arrays\n ts = np.linspace(0, duration, n)\n xs = np.zeros(n)\n ys = np.zeros(n)\n p_xs = np.zeros(n)\n p_ys = np.zeros(n)\n step_errors = np.zeros(n)\n h_list = np.zeros(n)\n info = np.zeros(2)\n\n # Do trajectory\n duration = toa + (2.0 * pi * LLO_RADIUS / LLO_VELOCITY) / (UNIT_TIME * DAY)\n status = symplectic(\n n, duration, x0, y0, p0_x, p0_y, xs, ys, p_xs, p_ys, step_errors, h_list, info\n )\n exit()\n return ts, xs, ys, p_xs, p_ys, step_errors, h_list\n\n\ndef low_energy_parts8(threads, n):\n \"\"\"Finding low energy transfer trajectory for the reduced 3-body problem.\n\n Args:\n n (int): Positions stored.\n\n Returns:\n Tuple of time-, x-, y-, p_x- and p_y lists.\n \n \"\"\"\n\n print(\"# Running low_energy_parts8.\")\n\n # Low-energy-short trajectory < 47 days\n duration = 200 / UNIT_TIME\n best_total_dv = 1e9\n best_toa = 0\n positions = 55\n angles = 1\n burns = 55\n\n # Divide circular earth orbit into 8 parts\n for i in range(0, 8):\n pos = i * pi / 4\n ang = 0\n # burn = 3.12/unit_velocity # moon\n burn = 3.09 / UNIT_VELOCITY # L1\n pos_range = 2 * pi / 16\n ang_range = pi / 2\n burn_range = 0.1 / UNIT_VELOCITY\n\n # Start search\n searches = 0\n max_searches = 3\n while searches < max_searches:\n runtime = time.time()\n searches += 1\n print(\"############## Search %i ###############\" % (searches))\n print(\"# pos = %f\" % (pos))\n print(\"# ang = %f\" % (ang))\n print(\"# burn = %f\" % (burn))\n pos_low = pos - pos_range\n pos_high = pos + pos_range\n ang_low = ang - ang_range\n ang_high = ang + ang_range\n burn_low = burn - burn_range\n burn_high = burn + burn_range\n stat, pos, ang, burn, x0, y0, p0_x, p0_y, dv, toa = search_mt(\n threads,\n 1,\n duration,\n positions,\n angles,\n burns,\n pos_low,\n pos_high,\n ang_low,\n ang_high,\n burn_low,\n burn_high,\n )\n\n if stat < 0:\n total_dv = abs(burn) + dv\n if best_total_dv > total_dv:\n best_total_dv = total_dv\n best_stat = stat\n best_pos = pos\n best_ang = ang\n best_burn = burn\n best_x0 = x0\n best_y0 = y0\n best_p0_x = p0_x\n best_p0_y = p0_y\n best_dv = dv\n best_toa = toa\n else:\n break\n\n pos_range *= 0.1\n ang_range *= 0.1\n burn_range *= 0.1\n\n runtime = time.time() - runtime\n print(\"# Search runtime = %3.2fs\" % (runtime))\n\n # Print best result\n if best_total_dv < 1e9:\n print(\"################ Best ################\")\n print(\"# Best dV(total) = %f km/s\" % (best_total_dv * UNIT_VELOCITY))\n print_search_results(\n best_stat,\n best_pos,\n best_ang,\n best_burn,\n best_x0,\n best_y0,\n best_p0_x,\n best_p0_y,\n best_dv,\n best_toa,\n )\n\n # Initialize arrays\n ts = np.linspace(0, duration, n)\n xs = np.zeros(n)\n ys = np.zeros(n)\n p_xs = np.zeros(n)\n p_ys = np.zeros(n)\n step_errors = np.zeros(n)\n h_list = np.zeros(n)\n info = np.zeros(2)\n\n # Do trajectory\n # duration = toa+(2.0*pi*llo_radius/llo_velocity)/(unit_time*day)\n status = symplectic(\n n, duration, x0, y0, p0_x, p0_y, xs, ys, p_xs, p_ys, step_errors, h_list, info\n )\n # exit()\n return ts, xs, ys, p_xs, p_ys, step_errors, h_list\n\n\ndef refine(threads, n, duration, pos, ang, burn, x0, y0, p0_x, p0_y):\n \"\"\"Integrate trajectory for the reduced 3-body problem.\n\n Args:\n n (int): Positions stored.\n duration (float): Time duration of simulation.\n x0 (float): Initial x-coordinate\n y0 (float): Initial y-coordinate\n p0_x (float): Initial generalized x-momentum\n p0_y (float): Initial generalized y-momentum\n\n Returns:\n Tuple of time-, x-, y-, p_x- and p_y lists.\n \n \"\"\"\n print(\"# Running refine.\")\n\n # Low-energy-long trajectory < 200 days\n # duration = 200/unit_time\n # Low-energy-short trajectory < 47 days\n # duration = 47/unit_time\n best_total_dv = 1e9\n best_toa = 0\n positions = 15\n angles = 15\n burns = 15\n\n # Divide circular earth orbit into 8 parts\n pos_range = 2 * pi / 16 * 0.1\n ang_range = pi / 100 * 0.1\n burn_range = 0.1 / UNIT_VELOCITY * 0.1\n\n # Start search\n searches = 0\n max_searches = 10\n while searches < max_searches:\n runtime = time.time()\n searches += 1\n print(\"############## Search %i ###############\" % (searches))\n print(\"# pos = %f\" % (pos))\n print(\"# ang = %f\" % (ang))\n print(\"# burn = %f\" % (burn))\n pos_low = pos - pos_range\n pos_high = pos + pos_range\n ang_low = ang - ang_range\n ang_high = ang + ang_range\n burn_low = burn - burn_range\n burn_high = burn + burn_range\n stat, pos, ang, burn, x0, y0, p0_x, p0_y, dv, toa = search_mt(\n threads,\n 1,\n duration,\n positions,\n angles,\n burns,\n pos_low,\n pos_high,\n ang_low,\n ang_high,\n burn_low,\n burn_high,\n )\n\n if stat < 0:\n total_dv = abs(burn) + dv\n if best_total_dv > total_dv:\n best_total_dv = total_dv\n best_stat = stat\n best_pos = pos\n best_ang = ang\n best_burn = burn\n best_x0 = x0\n best_y0 = y0\n best_p0_x = p0_x\n best_p0_y = p0_y\n best_dv = dv\n best_toa = toa\n else:\n break\n\n pos_range *= 0.1\n ang_range *= 0.1\n burn_range *= 0.1\n\n runtime = time.time() - runtime\n print(\"# Search runtime = %3.2fs\" % (runtime))\n\n # Print best result\n print(\"################ Best ################\")\n print(\"# Best dV(total) = %f km/s\" % (best_total_dv * UNIT_VELOCITY))\n print_search_results(\n best_stat,\n best_pos,\n best_ang,\n best_burn,\n best_x0,\n best_y0,\n best_p0_x,\n best_p0_y,\n best_dv,\n best_toa,\n )\n\n # Initialize arrays\n ts = np.linspace(0, duration, n)\n xs = np.zeros(n)\n ys = np.zeros(n)\n p_xs = np.zeros(n)\n p_ys = np.zeros(n)\n step_errors = np.zeros(n)\n h_list = np.zeros(n)\n info = np.zeros(2)\n\n # Do trajectory\n duration = toa + (2.0 * pi * LLO_RADIUS / LLO_VELOCITY) / (UNIT_TIME * DAY)\n status = symplectic(\n n, duration, x0, y0, p0_x, p0_y, xs, ys, p_xs, p_ys, step_errors, h_list, info\n )\n # exit()\n return ts, xs, ys, p_xs, p_ys, step_errors, h_list\n" }, { "alpha_fraction": 0.5439330339431763, "alphanum_fraction": 0.560669481754303, "avg_line_length": 14.933333396911621, "blob_id": "5e258a1e9657365d894458cd22a216ed2fd947fc", "content_id": "e640972a6726e1036d8543fa0e2bf23a1fec7321", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 239, "license_type": "permissive", "max_line_length": 31, "num_lines": 15, "path": "/code/marscudasim/simulate.h", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <cuda_runtime.h>\n\ntypedef struct {\n double t;\n double Q0[3];\n double B0[3];\n double burn;\n} psitype;\n\n__device__\nvoid simulate(psitype* psi,\n double* score,\n bool* success);\n" }, { "alpha_fraction": 0.41050809621810913, "alphanum_fraction": 0.41050809621810913, "avg_line_length": 42.29999923706055, "blob_id": "274a33c444ffec187dc6dc3890c05e316f1f147c", "content_id": "43c295b71548a89c1840dd0b66367fa2d4876039", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1732, "license_type": "permissive", "max_line_length": 77, "num_lines": 40, "path": "/code/marscudasim/euler_step.h", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"equations_of_motion.h\"\n\n__host__ __device__\ninline void euler_step(double h,\n double R, double theta, double phi,\n double B_R, double B_theta, double B_phi,\n double R_sun, double theta_sun, double phi_sun,\n double R_earth, double theta_earth, double phi_earth,\n double R_mars, double theta_mars, double phi_mars,\n double* R_, double* theta_, double* phi_,\n double* B_R_, double* B_theta_, double* B_phi_)\n{\n // Update q\n R = R + h * get_Rdot(B_R);\n theta = theta + h * get_thetadot(R, B_theta);\n phi = phi + h * get_phidot(R, theta, B_phi);\n *R_ = R;\n *theta_ = theta;\n *phi_ = phi;\n\n // Update B_R\n *B_R_ = B_R + h * get_Bdot_R(R, theta, phi,\n B_theta, B_phi,\n R_sun, theta_sun, phi_sun,\n R_earth, theta_earth, phi_earth,\n R_mars, theta_mars, phi_mars);\n // Update B_theta\n *B_theta_ = B_theta + h * get_Bdot_theta(R, theta, phi,\n B_phi,\n R_sun, theta_sun, phi_sun,\n R_earth, theta_earth, phi_earth,\n R_mars, theta_mars, phi_mars);\n // Update B_phi\n *B_phi_ = B_phi + h * get_Bdot_phi(R, theta, phi,\n R_sun, theta_sun, phi_sun,\n R_earth, theta_earth, phi_earth,\n R_mars, theta_mars, phi_mars);\n}\n" }, { "alpha_fraction": 0.5569620132446289, "alphanum_fraction": 0.5845504999160767, "avg_line_length": 31.09375, "blob_id": "1229267a9bd32d54c14dec0b8e61678abd70bd46", "content_id": "a7a6cee3e49c76942490dffc96ca9bcaa5fecc25", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3081, "license_type": "permissive", "max_line_length": 88, "num_lines": 96, "path": "/code/pyscripts/animation_example.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom matplotlib.lines import Line2D\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nfrom orbsim.r3b_2d.simulators import run_sim\nfrom orbsim.plotting import orbital_circle, get_idxs\nfrom orbsim.planets import celestials\nfrom orbsim.r3b_2d import *\n\nimport msvcrt as m\n\n\nclass Orbit(object):\n def __init__(self, path, ax, interval=32, inertial=False):\n # score, _, path = completed_path # [Dv,success?,[x,y,px,py,h]]\n xs, ys, _, _, hs, ts = np.array(path).T\n self.inertial = inertial\n Xs = xs * np.cos(ts) - ys * np.sin(ts)\n Ys = xs * np.sin(ts) + ys * np.cos(ts)\n\n Xs_moon = LUNAR_POSITION_X * np.cos(ts)\n Ys_moon = LUNAR_POSITION_X * np.sin(ts)\n\n Xs_earth = EARTH_POSITION_X * np.cos(ts)\n Ys_earth = EARTH_POSITION_X * np.sin(ts)\n\n circle_x, circle_y = orbital_circle(celestials.MOON)\n ax.plot(circle_x, circle_y, color=\"grey\", linewidth=0.3, alpha=0.6)\n ax.set_aspect(1)\n\n idxs = get_idxs(hs)\n\n if inertial:\n self.xs = Xs\n self.ys = Ys\n else:\n self.xs, self.ys = np.array([[xs[idx], ys[idx]] for idx in idxs])[\n ::interval\n ].T\n self.Xs_moon, self.Ys_moon = np.array(\n [[Xs_moon[idx], Ys_moon[idx]] for idx in idxs]\n )[::interval].T\n self.Xs_earth, self.Ys_earth = np.array(\n [[Xs_earth[idx], Ys_earth[idx]] for idx in idxs]\n )[::interval].T\n\n self.ax = ax\n self.xdata = [xs[0]]\n self.ydata = [ys[0]]\n self.line = Line2D(self.xdata, self.ydata, color=\"red\")\n self.ax.add_line(self.line)\n\n self.lunxdata = [LUNAR_POSITION_X]\n self.lunydata = [0]\n self.moonline = Line2D(self.lunxdata, self.lunydata, color=\"grey\")\n self.ax.add_line(self.moonline)\n\n self.earthxdata = [EARTH_POSITION_X]\n self.earthydata = [0]\n self.earthline = Line2D(self.earthxdata, self.earthydata, color=\"blue\")\n self.ax.add_line(self.earthline)\n\n self.ax.set_ylim(-1.1, 1.1)\n self.ax.set_xlim(-1.1, 1.1)\n\n def update(self, i):\n if i == len(self.xs)-1:\n m.getch()\n exit()\n x = self.xs[i]\n y = self.ys[i]\n print(x, y, self.Xs_moon[i] ** 2 + self.Ys_moon[i] ** 2)\n self.xdata.append(x)\n self.ydata.append(y)\n self.line.set_data(self.xdata, self.ydata)\n\n self.lunxdata.append(self.Xs_moon[i])\n self.lunydata.append(self.Ys_moon[i])\n self.moonline.set_data(self.lunxdata, self.lunydata)\n return self.line, self.moonline\n\n\nscore, success, path = run_sim(\n [3.794182930145708, 0.023901745288554, 3.090702702702703], duration=50, max_iter=1e7\n)\nfig, ax = plt.subplots()\ninterval = 32\norbit = Orbit(path, ax, interval=interval)\n\n# pass a generator in \"emitter\" to produce data for the update func\nani = animation.FuncAnimation(\n fig, orbit.update, range(int(len(path) / interval)), interval=5, blit=True\n)\n\nplt.show()\n" }, { "alpha_fraction": 0.5980392098426819, "alphanum_fraction": 0.6372548937797546, "avg_line_length": 19.399999618530273, "blob_id": "7812a90face47cc045e32b6bf8ed433ca6d57c38", "content_id": "4030aa722aeebcf10297bd279c56532454fb6dab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 204, "license_type": "permissive", "max_line_length": 36, "num_lines": 10, "path": "/code/.coveragerc", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "# .coveragerc to control coverage.py\n\n[run]\nomit =\n */__init__.py\n orbsim/__init__.py\n orbsim/r3b_2d/*\n orbsim/r4b_3d/simulation.py\n orbsim/r4b_3d/logging.py\n orbsim/r4b_3d/mplotting.py\n" }, { "alpha_fraction": 0.5610749125480652, "alphanum_fraction": 0.5985342264175415, "avg_line_length": 24.52083396911621, "blob_id": "1924caff4f832be07a0da36f15f8782f1199c4b8", "content_id": "89a87f3f5769df6c5edbf55a0562f92781b1a936", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1228, "license_type": "permissive", "max_line_length": 94, "num_lines": 48, "path": "/code/pyscripts/pykep_experiments.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[ ]:\nimport sys\nsys.path = [\"\"] + sys.path\n\nimport pygmo as pg\nfrom pykep import epoch, util\nfrom pykep.planet import jpl_lp\nfrom pykep.planet import spice\nfrom pykep.trajopt import mga_1dsm\n\n# In[ ]:\n\n\ndef goto_mars():\n # We define an Earth-Mars problem (single-objective)\n seq = [jpl_lp('earth'),jpl_lp('mars')]\n udp = mga_1dsm(\n seq=seq,\n t0=[epoch(18*365.25 + 1), epoch(25*365.25 + 1)],\n tof=[0.7 * 365.25, 7 * 365.25],\n vinf=[0.5, 5],\n add_vinf_dep=False,\n add_vinf_arr=True,\n multi_objective=False\n )\n\n pg.problem(udp)\n # We solve it!!\n uda = pg.sade(gen=200)\n archi = pg.archipelago(algo=uda, prob=udp, n=8, pop_size=30)\n print(\n \"Running a Self-Adaptive Differential Evolution Algorithm .... on 8 parallel islands\")\n archi.evolve(10)\n archi.wait()\n sols = archi.get_champions_f()\n idx = sols.index(min(sols))\n print(\"Done!! Solutions found are: \", archi.get_champions_f())\n print(f\"\\nThe best solution with Dv = {min(sols)[0]}:\\n\")\n udp.pretty(archi.get_champions_x()[idx])\n udp.plot(archi.get_champions_x()[idx],savepath=\"plot.png\")\n\n\n# In[ ]:\n\nif __name__ == \"__main__\":\n goto_mars()\n\n\n" }, { "alpha_fraction": 0.593562662601471, "alphanum_fraction": 0.6023982167243958, "avg_line_length": 28.616823196411133, "blob_id": "56a968097cb9f068c4e6f497e27bec3ef6438dfe", "content_id": "8d495a2fae32af68c538c7cf676f1fd8b15993bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3169, "license_type": "permissive", "max_line_length": 88, "num_lines": 107, "path": "/code/orbsim/r3b_2d/__init__.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nUnless otherwise noted, all units will be in:\n- Mass: kg\n- Length: km\n- Time: days TODO: Change to seconds due to better fit with typical time step size\n\nVariable name conventions:\n- non_dim: dimensionless (nondimensionalized)\n\"\"\"\nimport json\nimport os\nfrom math import pi, sqrt\n\nfrom orbsim import (\n G,\n DAY,\n EARTH_MASS,\n EARTH_MOON_DISTANCE,\n EARTH_RADIUS,\n LUNAR_MASS,\n LUNAR_ORBITAL_DURATION,\n LUNAR_RADIUS,\n)\n\n############### SIMULATION CONSTANTS ###############\n\n# class: Planets (planets.py)\nEARTH_ALTITUDE = 160.0 # km\nLUNAR_ALTITUDE = 100.0 # km\nORBITAL_TOLERANCE = 10 # km\n\n# function: symplectic (integrators.py)\nh_DEFAULT = 1e-6 # dimless time\nh_MIN_DEFAULT = 1e-10 # dimless time\nSTEP_ERROR_TOLERANCE = 1e-9 # dimless\n\n\n############### CHARACTERISTIC UNITS ###############\n\nUNIT_LENGTH = EARTH_MOON_DISTANCE # km\nUNIT_TIME = LUNAR_ORBITAL_DURATION / (2.0 * pi) # days\nUNIT_VELOCITY = UNIT_LENGTH / (UNIT_TIME * DAY) # km/s\n\nk = LUNAR_MASS / (EARTH_MASS + LUNAR_MASS) # dimless\n\n\n############### DERIVED BOUNDARY CONDITIONS ###############\n\n# Initial orbit (Earth)\nLEO_RADIUS = EARTH_RADIUS + EARTH_ALTITUDE # km\nLEO_VELOCITY = sqrt(G * EARTH_MASS / (LEO_RADIUS)) # km/s\n\n# Target orbit (Moon)\nLLO_RADIUS = LUNAR_RADIUS + LUNAR_ALTITUDE # km\nLLO_VELOCITY = sqrt(G * LUNAR_MASS / (LLO_RADIUS)) # km/s\n\n# Initial Positions (Note that Y for both Earth and Moon is always zero in (X,Y) system)\nLUNAR_POSITION_X = 1 - k\nEARTH_POSITION_X = -k\nL1_POSITION_X = 1 - pow(k / 3, 1 / 3)\n\n\n############### NONDIMENSIONALIZATION ###############\n\n# Nondimensionalized boundary conditions\nLEO_RADIUS_NONDIM = LEO_RADIUS / UNIT_LENGTH # dimless\nLEO_VELOCITY_NONDIM = LEO_VELOCITY / UNIT_VELOCITY # dimless\n\n\ndef update_constants_json():\n \"\"\" Write constant to constants.json file in same directory\"\"\"\n\n # Write constants to text file\n constants_dict = {\n ############### SIMULATION CONSTANTS ###############\n \"EARTH_ALTITUDE\": EARTH_ALTITUDE,\n \"LUNAR_ALTITUDE\": LUNAR_ALTITUDE,\n \"ORBITAL_TOLERANCE\": ORBITAL_TOLERANCE,\n \"h_DEFAULT\": h_DEFAULT,\n \"h_MIN\": h_MIN_DEFAULT,\n \"STEP_ERROR_TOLERANCE\": STEP_ERROR_TOLERANCE,\n ############### CHARACTERISTIC UNITS ###############\n \"UNIT_LENGTH\": UNIT_LENGTH,\n \"UNIT_TIME\": UNIT_TIME,\n \"UNIT_VELOCITY\": UNIT_VELOCITY,\n \"k\": k,\n ############### DERIVED BOUNDARY CONDITIONS ###############\n \"LEO_RADIUS\": LEO_RADIUS,\n \"LEO_VELOCITY\": LEO_VELOCITY,\n \"LLO_RADIUS\": LLO_RADIUS,\n \"LLO_VELOCITY\": LLO_VELOCITY,\n \"LUNAR_POSITION_X\": LUNAR_POSITION_X,\n \"EARTH_POSITION_X\": EARTH_POSITION_X,\n \"L1_POSITION_X\": L1_POSITION_X,\n ############### NONDIMENSIONALIZATION ###############\n \"LEO_RADIUS_NONDIM\": LEO_RADIUS_NONDIM,\n \"LEO_VELOCITY_NONDIM\": LEO_VELOCITY_NONDIM,\n }\n\n orbsim_path = os.path.dirname(os.path.abspath(__file__))\n\n with open(orbsim_path + \"/constants.json\", \"w\", newline=\"\\n\") as file:\n file.write(json.dumps(constants_dict, indent=2))\n\n\nif __name__ == \"__main__\":\n update_constants_json()\n" }, { "alpha_fraction": 0.49982765316963196, "alphanum_fraction": 0.5568769574165344, "avg_line_length": 25.01793670654297, "blob_id": "28c2effc278b9ab30bdb6fc736e6ef60296462c9", "content_id": "b8120ee7ce05ab2e1f24a1b7570920e341521b30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5802, "license_type": "permissive", "max_line_length": 93, "num_lines": 223, "path": "/code/orbsim/r4b_3d/integrators.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nImplements symplectic integrators that integrates H-R4B system equations.\n\n1. `analyticals.py`: set up the equations of motion.\n\n2. `integrators.py`: discretize the equations of motion and defines a single time step of the\n chosen numerical algorithm.\n\n3. `simulation.py`: run the single steps from `integrators.py` repeatedly for some initial\n conditions and stopping conditions.\n\"\"\"\nfrom math import pi\n\nimport numpy as np\nfrom numpy import cos, sin, sqrt, tan\n\nfrom orbsim.r4b_3d import EARTH_ETA, MARS_ETA, SUN_ETA\nfrom orbsim.r4b_3d.coordinate_system import (\n keep_phi_in_interval_npi_to_pi,\n keep_theta_in_interval_zero_to_pi,\n)\nfrom orbsim.r4b_3d.equations_of_motion import (\n get_Bdot_phi,\n get_Bdot_R,\n get_Bdot_theta,\n get_phidot,\n get_Rdot,\n get_thetadot,\n)\n\neta_ks = [SUN_ETA, EARTH_ETA, MARS_ETA]\n\n# from numba import njit # boolean, float64, jit\n\n\n# @njit\ndef euler_step_symplectic(h, Q, B, eph_coords):\n \"\"\"Takes a single time step of the symplectic Euler algorithm\"\"\"\n # Unpack Q, B and eph_coords\n R, theta, phi = Q\n B_R, B_theta, B_phi = B\n R_ks, theta_ks, phi_ks = eph_coords\n\n # Update q\n R = R + h * get_Rdot(B_R)\n theta = theta + h * get_thetadot(R, B_theta)\n phi = phi + h * get_phidot(R, theta, B_phi)\n\n if theta <= 0 or theta >= pi:\n theta = keep_theta_in_interval_zero_to_pi(theta)\n if phi <= -pi or phi > pi:\n phi = keep_phi_in_interval_npi_to_pi(phi)\n\n # Update B_R\n Bdot_R = get_Bdot_R(R, theta, phi, B_theta, B_phi, R_ks, theta_ks, phi_ks)\n B_R = B_R + h * Bdot_R\n\n # Update B_theta\n Bdot_theta = get_Bdot_theta(R, theta, phi, B_phi, R_ks, theta_ks, phi_ks)\n B_theta = B_theta + h * Bdot_theta\n\n # Update B_phi\n Bdot_phi = get_Bdot_phi(R, theta, phi, R_ks, theta_ks, phi_ks)\n B_phi = B_phi + h * Bdot_phi\n\n return ((R, theta, phi), (B_R, B_theta, B_phi))\n\n\n# @njit\ndef verlet_step_symplectic(h, Q, B, eph_coords):\n \"\"\"Takes a single time step of the symplectic Euler algorithm\"\"\"\n # Unpack Q, B and eph_coords\n R0, theta0, phi0 = Q\n B_R0, B_theta0, B_phi0 = B\n\n hh = h / 2\n\n # Update Qh\n R_h = R0 + hh * B_R0\n theta_h = theta0 + hh * B_theta0 / R_h ** 2\n phi_h = phi0 + hh * B_phi0 / (R_h ** 2 * sin(theta_h) ** 2)\n\n Q_h = [R_h, theta_h, phi_h]\n\n # Update B1\n lambda_hks = get_lambda_hks(Q_h, eph_coords)\n\n gamma_hks = get_gamma_hks(Q_h, eph_coords)\n summation_gamma = np.sum(gamma_hks / lambda_hks)\n B_phi1 = B_phi0 + hh * (2 * summation_gamma)\n\n beta_hks = get_beta_hks(Q_h, eph_coords)\n summation_beta = np.sum(beta_hks / lambda_hks)\n B_theta1 = B_theta0 + hh * (\n (B_phi0 ** 2 + B_phi1 ** 2) / (R_h ** 2 * sin(theta_h) ** 2 * tan(theta_h))\n + 2 * summation_beta\n )\n\n alpha_hks = get_alpha_hks(Q_h, eph_coords)\n summation_alpha = np.sum(alpha_hks / lambda_hks)\n B_R1 = B_R0 + hh * (\n (B_theta0 ** 2 + B_theta1 ** 2) / (R_h ** 3)\n + (B_phi0 ** 2 + B_phi1 ** 2) / (R_h ** 3 * sin(theta_h) ** 2)\n + 2 * summation_alpha\n )\n\n # Update Q1\n R1 = R_h + hh * B_R1\n theta1 = theta_h + hh * (B_theta1 / R_h ** 2)\n phi1 = phi_h + hh * (B_phi1 / (R_h ** 2 * sin(theta_h) ** 2))\n\n Q1 = [R1, theta1, phi1]\n B1 = [B_R1, B_theta1, B_phi1]\n\n return Q1, B1\n\n\ndef get_lambda_hks(Q_h, eph_coords):\n # Unpack Q_h, B and eph_coords\n R_h, theta_h, phi_h = Q_h\n R_ks, theta_ks, phi_ks = eph_coords\n\n R_ks = np.array(R_ks)\n theta_ks = np.array(theta_ks)\n phi_ks = np.array(phi_ks)\n\n denominators_base = (\n R_h ** 2\n + R_ks ** 2\n - 2\n * R_h\n * R_ks\n * (\n cos(theta_h) * cos(theta_ks)\n + sin(theta_h) * sin(theta_ks) * cos(phi_h - phi_ks)\n )\n )\n\n denominators = denominators_base * sqrt(denominators_base)\n\n return denominators\n\n\ndef get_gamma_hks(Q_h, eph_coords):\n # Unpack Q_h, B and eph_coords\n R_h, theta_h, phi_h = Q_h\n R_ks, theta_ks, phi_ks = eph_coords\n\n R_ks = np.array(R_ks)\n theta_ks = np.array(theta_ks)\n phi_ks = np.array(phi_ks)\n\n numerators = eta_ks * (\n -R_h * R_ks * sin(theta_h) * sin(theta_ks) * sin(phi_h - phi_ks)\n )\n\n return numerators\n\n\ndef get_beta_hks(Q_h, eph_coords):\n # Unpack Q_h, B and eph_coords\n R_h, theta_h, phi_h = Q_h\n R_ks, theta_ks, phi_ks = eph_coords\n\n R_ks = np.array(R_ks)\n theta_ks = np.array(theta_ks)\n phi_ks = np.array(phi_ks)\n\n numerators = eta_ks * (\n R_h\n * R_ks\n * (\n -sin(theta_h) * cos(theta_ks)\n + cos(theta_h) * sin(theta_ks) * cos(phi_h - phi_ks)\n )\n )\n\n return numerators\n\n\ndef get_alpha_hks(Q_h, eph_coords):\n # Unpack Q_h, B and eph_coords\n R_h, theta_h, phi_h = Q_h\n R_ks, theta_ks, phi_ks = eph_coords\n\n R_ks = np.array(R_ks)\n theta_ks = np.array(theta_ks)\n phi_ks = np.array(phi_ks)\n\n numerators = eta_ks * (\n -R_h\n + R_ks\n * (\n cos(theta_h) * cos(theta_ks)\n + sin(theta_h) * sin(theta_ks) * cos(phi_h - phi_ks)\n )\n )\n\n return numerators\n\n\n# @njit\n# def relative_error(vec1, vec2):\n# x1, y1 = vec1\n# x2, y2 = vec2\n# return sqrt(((x2 - x1) ** 2 + (y2 - y1) ** 2) / (x2 ** 2 + y2 ** 2))\n\n# if __name__ == \"__main__\":\n\n# from pprint import pprint\n\n# test = euler_step_symplectic(\n# 3.1687536450894706e-08,\n# [0.9833550575288669, 1.1683216354741335, 1.7605747565734895],\n# [0.06619397691044351, 0.6131467542061076, 8.857580619176503],\n# [\n# [0.0, 0.983311354517, 1.45349465364],\n# [0.7853981633974483, 1.1683216629370692, 1.3089386258001088],\n# [0.0, 1.7605751533054472, 0.681572830178241],\n# ],\n# )\n\n# pass\n" }, { "alpha_fraction": 0.5683251619338989, "alphanum_fraction": 0.5868955850601196, "avg_line_length": 24.945453643798828, "blob_id": "3259f0d53cab927af7794a003707b683e8240cc4", "content_id": "972e573ab49319689731366b7e5651cc68b28aaa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2854, "license_type": "permissive", "max_line_length": 111, "num_lines": 110, "path": "/code/orbsim/r3b_2d/analyticals.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nEquations of motion for R3B-2D system (Restricted 3-Body Problem in 2 Dimensions).\nDerived via Hamiltons's equations.\n\"\"\"\n\nfrom math import sqrt\nimport numpy as np\nimport numpy.random as npr\n\nfrom numba import njit\n\nfrom orbsim.r3b_2d import k\n\n\n@njit\ndef get_pdot_x(x, y, p_y):\n \"\"\"generalized momentum p_x (nondimensionalized) from position and momentum vectors\"\"\"\n denominator_1, denominator_2 = pdot_denominators(x, y)\n pdot_x = (\n p_y - ((1 - k) * (x + k)) / denominator_1 - k * (x - 1 + k) / denominator_2\n ) # Note: In old version there was a sign error and \"1+k-x\" used to be \"1-k-x\"\n return pdot_x\n\n\n@njit\ndef get_pdot_y(x, y, p_x):\n \"\"\"generalized momentum p_y (nondimensionalized) from position and momentum vectors\"\"\"\n denominator_1, denominator_2 = pdot_denominators(x, y)\n pdot_y = -p_x - (1 - k) * y / denominator_1 - k * y / denominator_2\n return pdot_y\n\n\n@njit\ndef pdot_denominators(x, y):\n \"\"\"denominators used in get_pdot_x and get_pdot_y\"\"\"\n denominator_1 = ((x + k) ** 2 + y ** 2) * sqrt((x + k) ** 2 + y ** 2)\n denominator_2 = ((x - 1 + k) ** 2 + y ** 2) * sqrt(\n (x - 1 + k) ** 2 + y ** 2\n ) # Note: In old version there was a sign error and \"1+k-x\" used to be \"1-k-x\"\n return denominator_1, denominator_2\n\n\n@njit\ndef get_xdot(y, p_x):\n \"\"\"speed in x direction, from coordinates and momenta\"\"\"\n v_x = p_x + y\n return v_x\n\n\n@njit\ndef get_ydot(x, p_y):\n \"\"\"speed in y direction, from coordinates and momenta\"\"\"\n v_y = p_y - x\n return v_y\n\n\n@njit\ndef collapse_intervals(bounds):\n lens = []\n for i in range(len(bounds)):\n lb = bounds[i][0]\n ub = bounds[i][1]\n lens.append(ub - lb)\n lens = np.array(lens)\n return lens.cumsum()\n\n\n@njit\ndef random_disjoint_intervals(bounds):\n \"\"\"\n input: bounds = np.array([[lowerbound0,upperbound0],[lb1,ub1],..,[lbn,ubn]]), must be disjoint, and sorted.\n returns: single uniform random number within the union of the given intervals.\n \"\"\"\n lens = collapse_intervals(bounds)\n total_len = lens[-1]\n\n R = npr.rand() * total_len\n\n idx = 0\n for i in range(len(bounds)):\n if R <= lens[i]:\n idx = i\n break\n lb = bounds[idx][0]\n ub = bounds[idx][1]\n R_star = (ub - lb) * npr.rand() + lb\n return R_star\n\n\ndef check_bound(v, bound):\n \"\"\"\n checks a single value against its bounds, and returns \n the closest bound if it's in violation \n \"\"\"\n for b in bound:\n if v >= b[0] and v <= b[1]:\n return v\n return min([(abs(x - v), x) for x in bound.flatten()])[1]\n\n\ndef ensure_bounds(psi, bounds):\n new_psi = []\n for v, bound in zip(psi, bounds):\n new_psi.append(check_bound(v, bound))\n return new_psi\n\n\nif __name__ == \"__main__\":\n test1 = get_pdot_x(x=0, y=0, p_y=0)\n print(test1)\n" }, { "alpha_fraction": 0.6909090876579285, "alphanum_fraction": 0.7636363506317139, "avg_line_length": 27, "blob_id": "c349df9400fc7b354a69fc7079b3005e615a89f4", "content_id": "1ec15ed9d14053fd45c19093882a8e60c950a89f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 55, "license_type": "permissive", "max_line_length": 32, "num_lines": 2, "path": "/jupyter_docker/setup.sh", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "mkdir $(pwd)/notebooks\nsudo chown 1000 $(pwd)/notebooks" }, { "alpha_fraction": 0.5562854409217834, "alphanum_fraction": 0.5662884712219238, "avg_line_length": 43.95302200317383, "blob_id": "59ee21993c01c57568bcaa3f3decea5df6270234", "content_id": "3802e0385af745ec9130a2d4003bfbc2d89dc5a4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6698, "license_type": "permissive", "max_line_length": 153, "num_lines": 149, "path": "/code/marscudasim/equations_of_motion.h", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <math.h>\n\n__host__ __device__\ninline double get_Rdot(double B_R)\n{\n return B_R;\n}\n\n__host__ __device__\ninline double get_thetadot(double R, double B_theta)\n{\n return B_theta / (R * R);\n}\n\n__host__ __device__\ninline double get_phidot(double R, double theta, double B_phi)\n{\n return B_phi / (R * R * sin(theta) * sin(theta));\n}\n\n__host__ __device__\ninline double get_B_R(double Rdot)\n{\n return Rdot;\n}\n\n__host__ __device__\ninline double get_B_theta(double R, double thetadot)\n{\n return R * R * thetadot;\n}\n\n__host__ __device__\ninline double get_B_phi(double R, double theta, double phidot)\n{\n return R * R * sin(theta) * sin(theta) * phidot;\n}\n\n__host__ __device__\ninline double get_Bdot_R(double R, double theta, double phi, \n double B_theta, double B_phi,\n double R_sun, double theta_sun, double phi_sun,\n double R_earth, double theta_earth, double phi_earth,\n double R_mars, double theta_mars, double phi_mars)\n{\n double numerator_sun = SUN_ETA *\n (-R + R_sun * (cos(theta) * cos(theta_sun) + sin(theta) * sin(theta_sun) * cos(phi - phi_sun)));\n double denominator_sun = R * R + R_sun * R_sun -\n 2.0 * R * R_sun * (cos(theta) * cos(theta_sun) +\n sin(theta) * sin(theta_sun) * cos(phi - phi_sun));\n denominator_sun = denominator_sun * sqrt(denominator_sun);\n\n double numerator_earth = EARTH_ETA *\n (-R + R_earth * (cos(theta) * cos(theta_earth) + sin(theta) * sin(theta_earth) * cos(phi - phi_earth)));\n double denominator_earth = R * R + R_earth * R_earth -\n 2.0 * R * R_earth * (cos(theta) * cos(theta_earth) +\n sin(theta) * sin(theta_earth) * cos(phi - phi_earth));\n denominator_earth = denominator_earth * sqrt(denominator_earth);\n\n double numerator_mars = MARS_ETA *\n (-R + R_mars * (cos(theta) * cos(theta_mars) + sin(theta) * sin(theta_mars) * cos(phi - phi_mars)));\n double denominator_mars = R * R + R_mars * R_mars -\n 2.0 * R * R_mars * (cos(theta) * cos(theta_mars) +\n sin(theta) * sin(theta_mars) * cos(phi - phi_mars));\n denominator_mars = denominator_mars * sqrt(denominator_mars);\n double Bdot_R1 = (B_theta * B_theta) / (R * R * R);\n double Bdot_R2 = (B_phi * B_phi) /(R * R * R * sin(theta) * sin(theta));\n double Bdot_R3 =\n (numerator_sun / denominator_sun) +\n (numerator_earth / denominator_earth) +\n (numerator_mars / denominator_mars);\n /*\n if (Bdot_R1 + Bdot_R2 + Bdot_R3 < -1e100)\n {\n printf(\"====== numerator_earth2=%.15le denominator_earth2=%.15le\\n\", numerator_earth2, denominator_earth2);\n printf(\"====== numerator_sun=%.15le numerator_earth=%.15le numerator_mars=%.15le\\n\", numerator_sun, numerator_earth, numerator_mars);\n printf(\"====== denominator_sun=%.15le denominator_earth=%.15le denominator_mars=%.15le\\n\", denominator_sun, denominator_earth, denominator_mars);\n printf(\"====== Bdot_R1=%.15lf Bdot_R2=%.15lf Bdot_R3=%.15lf\\n\", Bdot_R1, Bdot_R2, Bdot_R3);\n }\n */\n return Bdot_R1 + Bdot_R2 + Bdot_R3;\n}\n\n__host__ __device__\ninline double get_Bdot_theta(double R, double theta, double phi, \n double B_phi,\n double R_sun, double theta_sun, double phi_sun,\n double R_earth, double theta_earth, double phi_earth,\n double R_mars, double theta_mars, double phi_mars)\n{\n double numerator_sun = SUN_ETA * \n (R * R_sun * (-sin(theta) * cos(theta_sun) + cos(theta) * sin(theta_sun) * cos(phi - phi_sun)));\n double denominator_sun = R * R + R_sun * R_sun -\n 2.0 * R * R_sun * (cos(theta) * cos(theta_sun) +\n sin(theta) * sin(theta_sun) * cos(phi - phi_sun));\n denominator_sun = denominator_sun * sqrt(denominator_sun);\n double numerator_earth = EARTH_ETA * \n (R * R_earth * (-sin(theta) * cos(theta_earth) + cos(theta) * sin(theta_earth) * cos(phi - phi_earth)));\n double denominator_earth = R * R + R_earth * R_earth -\n 2.0 * R * R_earth * (cos(theta) * cos(theta_earth) +\n sin(theta) * sin(theta_earth) * cos(phi - phi_earth));\n denominator_earth = denominator_earth * sqrt(denominator_earth);\n double numerator_mars = MARS_ETA * \n (R * R_mars * (-sin(theta) * cos(theta_mars) + cos(theta) * sin(theta_mars) * cos(phi - phi_mars)));\n double denominator_mars = R * R + R_mars * R_mars -\n 2.0 * R * R_mars * (cos(theta) * cos(theta_mars) +\n sin(theta) * sin(theta_mars) * cos(phi - phi_mars));\n denominator_mars = denominator_mars * sqrt(denominator_mars);\n\n double Bdot_theta1 = (B_phi * B_phi) /\n (R * R * sin(theta) * sin(theta) * tan(theta));\n double Bdot_theta2 =\n numerator_sun / denominator_sun + \n numerator_earth / denominator_earth + \n numerator_mars / denominator_mars;\n return Bdot_theta1 + Bdot_theta2;\n}\n\n__host__ __device__\ninline double get_Bdot_phi(double R, double theta, double phi, \n double R_sun, double theta_sun, double phi_sun,\n double R_earth, double theta_earth, double phi_earth,\n double R_mars, double theta_mars, double phi_mars)\n{\n double numerator_sun = SUN_ETA * \n (-R * R_sun * sin(theta) * sin(theta_sun) * sin(phi - phi_sun));\n double denominator_sun = R * R + R_sun * R_sun -\n 2.0 * R * R_sun * (cos(theta) * cos(theta_sun) +\n sin(theta) * sin(theta_sun) * cos(phi - phi_sun));\n denominator_sun = denominator_sun * sqrt(denominator_sun);\n double numerator_earth = EARTH_ETA * \n (-R * R_earth * sin(theta) * sin(theta_earth) * sin(phi - phi_earth));\n double denominator_earth = R * R + R_earth * R_earth -\n 2.0 * R * R_earth * (cos(theta) * cos(theta_earth) +\n sin(theta) * sin(theta_earth) * cos(phi - phi_earth));\n denominator_earth = denominator_earth * sqrt(denominator_earth);\n double numerator_mars = MARS_ETA * \n (-R * R_mars * sin(theta) * sin(theta_mars) * sin(phi - phi_mars));\n double denominator_mars = R * R + R_mars * R_mars -\n 2.0 * R * R_mars * (cos(theta) * cos(theta_mars) +\n sin(theta) * sin(theta_mars) * cos(phi - phi_mars));\n denominator_mars = denominator_mars * sqrt(denominator_mars);\n return\n numerator_sun / denominator_sun + \n numerator_earth / denominator_earth + \n numerator_mars / denominator_mars;\n}\n" }, { "alpha_fraction": 0.46678245067596436, "alphanum_fraction": 0.49413809180259705, "avg_line_length": 18.352941513061523, "blob_id": "2730e3ee00e85c6b87b25a0e651e6b9190d6e450", "content_id": "55d2c482b62190c06c1cab99e778315c986d0e42", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2303, "license_type": "permissive", "max_line_length": 87, "num_lines": 119, "path": "/code/tests/r3b_2d/test_analyticals.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"Pytest for R3B-2D equations, e.g. pdot_x and pdot_y. Note that x and y are so simple\nthat they need not be tested.\n\"\"\"\nimport json\nimport os\n\nimport pytest\n\nfrom orbsim.r3b_2d.analyticals import get_pdot_x, get_pdot_y\n\n\ndef math_data():\n \"\"\"import data from json file created by mathematica script and import as\n 'ground truth'\n \"\"\"\n\n math_json_filename = os.path.basename(__file__).split(\".\")[0] + \".json\"\n\n with open(\n os.path.dirname(os.path.realpath(__file__)) + \"/\" + math_json_filename\n ) as file:\n data = json.load(file)\n return data\n\n\ndef test1_zeros():\n \"\"\"Test 1\"\"\"\n data = math_data()\n x = 0\n y = 0\n p_x = 0\n p_y = 0\n assert [get_pdot_x(x, y, p_y), get_pdot_y(x, y, p_x)] == pytest.approx(\n data[\"test1\"]\n )\n\n\ndef test2_x_positive():\n \"\"\"Test 2\"\"\"\n data = math_data()\n x = 0.5\n y = 0\n p_x = 0\n p_y = 0\n assert [get_pdot_x(x, y, p_y), get_pdot_y(x, y, p_x)] == pytest.approx(\n data[\"test2\"]\n )\n\n\ndef test3_y_negative():\n \"\"\"Test 3\"\"\"\n data = math_data()\n x = 0\n y = -0.4\n p_x = 0\n p_y = 0\n assert [get_pdot_x(x, y, p_y), get_pdot_y(x, y, p_x)] == pytest.approx(\n data[\"test3\"]\n )\n\n\ndef test4_px_positive():\n \"\"\"Test 4\"\"\"\n data = math_data()\n x = 0\n y = 0\n p_x = 2\n p_y = 0\n assert [get_pdot_x(x, y, p_y), get_pdot_y(x, y, p_x)] == pytest.approx(\n data[\"test4\"]\n )\n\n\ndef test5_py_negative():\n \"\"\"Test 5\"\"\"\n data = math_data()\n x = 0\n y = 0\n p_x = 0\n p_y = -5\n assert [get_pdot_x(x, y, p_y), get_pdot_y(x, y, p_x)] == pytest.approx(\n data[\"test5\"]\n )\n\n\ndef test6_all_positive():\n \"\"\"Test 6\"\"\"\n data = math_data()\n x = 1\n y = 2\n p_x = 3\n p_y = 4\n assert [get_pdot_x(x, y, p_y), get_pdot_y(x, y, p_x)] == pytest.approx(\n data[\"test6\"]\n )\n\n\ndef test7_all_negative():\n \"\"\"Test 7\"\"\"\n data = math_data()\n x = -4\n y = -3\n p_x = -2\n p_y = -1\n assert [get_pdot_x(x, y, p_y), get_pdot_y(x, y, p_x)] == pytest.approx(\n data[\"test7\"]\n )\n\n\ndef test8_all_mixed():\n \"\"\"Test 8\"\"\"\n data = math_data()\n x = -1\n y = 1\n p_x = -3\n p_y = 4\n assert [get_pdot_x(x, y, p_y), get_pdot_y(x, y, p_x)] == pytest.approx(\n data[\"test8\"]\n )\n" }, { "alpha_fraction": 0.5807791948318481, "alphanum_fraction": 0.5994805097579956, "avg_line_length": 29.0625, "blob_id": "358d76d421073461d0cfb9c9b96ab63e96bdea19", "content_id": "9208b28444daa485b3c223a9f0b0ed98b60011fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1925, "license_type": "permissive", "max_line_length": 85, "num_lines": 64, "path": "/code/orbsim/r3b_2d/simulators.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "import time\n\nimport numpy as np\nfrom numba import njit\n\nfrom . import (\n UNIT_TIME,\n UNIT_VELOCITY,\n LEO_RADIUS_NONDIM,\n EARTH_POSITION_X,\n LEO_VELOCITY_NONDIM,\n)\nfrom .integrators import symplectic\n\n\n@njit\ndef run_sim(psi, duration=3, max_iter=1e7):\n \"\"\"\n return: [Dv, success, List[x, y, px, py, h]]\n launch (not really a launch since we start from LEO) a \n single rocket with a given set of hyperparameters, return the resulting path\n \"\"\"\n pos_ang, burn_ang, burnDv = psi # extract parameters from decision vector\n burnDv /= UNIT_VELOCITY\n duration /= UNIT_TIME\n\n \"\"\"define init params\"\"\"\n # position (where on earth do we start our burn)\n x0 = np.cos(pos_ang) * LEO_RADIUS_NONDIM\n y0 = np.sin(pos_ang) * LEO_RADIUS_NONDIM\n x0 += EARTH_POSITION_X\n\n # how fast are we going when we start?\n vhat_x = -np.sin(pos_ang)\n vhat_y = np.cos(pos_ang)\n v_x = (LEO_VELOCITY_NONDIM) * vhat_x\n v_y = (LEO_VELOCITY_NONDIM) * vhat_y\n\n # burn vector: At what angle do we launch outward, and how hard do we push?\n burnDv_x = np.cos(burn_ang) * vhat_x - np.sin(burn_ang) * vhat_y\n burnDv_y = np.sin(burn_ang) * vhat_x + np.cos(burn_ang) * vhat_y\n\n # resultant momentum vector\n p0_x = v_x + burnDv * burnDv_x - y0\n p0_y = v_y + burnDv * burnDv_y + x0\n\n \"\"\"SIMULATE\"\"\"\n # print(f\"running symplectic with [x0, y0, p0_x, p0_y]{[x0, y0, p0_x, p0_y]}\")\n # starttime = time.time()\n score = [0.0]\n success = [0]\n path = symplectic(\n x0, y0, p0_x, p0_y, score, success, duration=duration, max_iter=int(max_iter)\n )\n return score[0], success[0], path\n # if success[0] == 1:\n # # print(\"SUCCESS\")\n # final_score = score[0] + burnDv\n # # print(\"score = \", final_score)\n # else:\n # final_score = (1 + score[0]) * 10\n # # print(\"score = \", final_score)\n\n # return score[0], success[0], path\n\n" }, { "alpha_fraction": 0.5557112097740173, "alphanum_fraction": 0.5731567740440369, "avg_line_length": 30.408000946044922, "blob_id": "d36aedbe54a951acf531edc6141b45608121d235", "content_id": "4c2c9bdcf3ff32e777dcf9c9bebb2d70e6cd1d2a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7853, "license_type": "permissive", "max_line_length": 143, "num_lines": 250, "path": "/code/orbsim/r3b_2d/plotting.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "from math import cos, pi, sin, sqrt\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom orbsim.r3b_2d import *\n\n\ndef orbitplot2d(completed_path, psi=None, filepath=\".\", title=None, multi_mode=False):\n\n \"\"\"\n input: output of launch_sim, its launch parameters, and an optional title if the file is to be saved\n \n Plots a figure of the inputted orbit, with start point marked in green, and point marked in red, earth and moon/mars marked as well.\n \"\"\"\n score, _, path = completed_path # [Dv,success?,[x,y,px,py,h]]\n\n xs, ys, pxs, pys, hs, ts = np.array(path).T\n\n Xs = xs * np.cos(ts) - ys * np.sin(ts)\n Ys = xs * np.sin(ts) + ys * np.cos(ts)\n\n Xs_earth = EARTH_POSITION_X * np.cos(ts)\n Ys_earth = EARTH_POSITION_X * np.sin(ts)\n\n Xs_moon = LUNAR_POSITION_X * np.cos(ts)\n Ys_moon = LUNAR_POSITION_X * np.sin(ts)\n\n idxs = get_idxs(ts)\n N_PTS = len(idxs)\n increment = int(N_PTS / 100)\n\n if multi_mode:\n ax = plt.gca()\n else:\n fig, ax = fig_setup(score, psi)\n cm = plt.get_cmap(\"bone\")\n ax.set_prop_cycle(\n \"color\",\n [\n cm(max(0, 0.9 - (1. * i / (N_PTS / increment))))\n for i in range(int(N_PTS / increment))\n ],\n )\n\n g_Xs, g_Ys = np.array([[Xs[idx], Ys[idx]] for idx in idxs]).T\n for i in range(N_PTS)[::increment]:\n ax.plot(g_Xs[i : i + increment], g_Ys[i : i + increment], linewidth=1)\n\n ax.plot(Xs_earth, Ys_earth, color=\"grey\", linewidth=0.5, alpha=0.8)\n ax.plot(Xs_moon, Ys_moon, color=\"grey\", linewidth=0.5)\n\n circle_x, circle_y = orbital_circle(\"moon\")\n ax.plot(circle_x, circle_y, color=\"grey\", linewidth=0.3, alpha=0.6)\n\n earth = plt.Circle(\n (Xs_earth[0], Ys_earth[0]), EARTH_RADIUS / UNIT_LENGTH, color=\"blue\", alpha=0.8\n )\n moon = plt.Circle(\n (Xs_moon[-1], Ys_moon[-1]), LUNAR_RADIUS / UNIT_LENGTH, color=\"grey\", alpha=0.8\n )\n ax.add_artist(earth)\n ax.add_artist(moon)\n\n # ax.scatter(Xs[0], Ys[0], color=\"green\")\n ax.scatter(Xs[-1], Ys[-1], color=\"red\", marker=\"x\", linewidth=0.6)\n\n if not multi_mode:\n if title is None:\n plt.show()\n else:\n filename = f\"{filepath}/path_{title}.pdf\"\n plt.savefig(filename)\n\n\ndef orbitplot_non_inertial(\n completed_path, psi=None, filepath=\".\", title=None, multi_mode=False\n):\n \"\"\"\n input: output of launch_sim, its launch parameters, and an optional title if the file is to be saved\n \n Plots a figure of the inputted orbit in the non-inertial reference frame, with end point marked in red, earth and moon/mars marked as well.\n \"\"\"\n score, _, path = completed_path # [Dv,[x,y,px,py,h]]\n\n xs, ys, pxs, pys, hs, _ = np.array(path).T\n\n idxs = get_idxs(hs)\n N_PTS = len(idxs)\n increment = int(N_PTS / 100)\n if multi_mode:\n ax = plt.gca()\n else:\n fig, ax = fig_setup(score, psi)\n cm = plt.get_cmap(\"bone\")\n ax.set_prop_cycle(\n \"color\",\n [\n cm(max(0, 0.9 - (1. * i / (N_PTS / increment))))\n for i in range(int(N_PTS / increment))\n ],\n )\n\n g_xs, g_ys = np.array([[xs[idx], ys[idx]] for idx in idxs]).T\n for i in range(N_PTS)[::increment]:\n ax.plot(g_xs[i : i + increment], g_ys[i : i + increment], linewidth=1)\n\n earth = plt.Circle((EARTH_POSITION_X, 0), EARTH_RADIUS / UNIT_LENGTH, color=\"blue\")\n moon = plt.Circle((LUNAR_POSITION_X, 0), LUNAR_RADIUS / UNIT_LENGTH, color=\"grey\")\n ax.add_artist(earth)\n ax.add_artist(moon)\n\n ax.scatter([L1_POSITION_X], [0], marker=\"x\", color=\"pink\", linewidth=0.4)\n ax.scatter(xs[-1], ys[-1], color=\"red\", marker=\"x\", linewidth=0.6)\n\n circle_x, circle_y = orbital_circle(\"moon\")\n ax.plot(circle_x, circle_y, color=\"grey\", linewidth=0.3, alpha=0.3)\n\n if not multi_mode:\n if title is None:\n plt.show()\n else:\n filename = f\"{filepath}/path_{title}_non-inertial.pdf\"\n plt.savefig(filename)\n\n\ndef get_idxs(hs):\n idxs = []\n tally = 0\n for i in range(\n len(hs)\n ): # each time step h, check whether the little tally has reached our threshold.\n h = hs[i] # if it has, take that index as a time step\n tally += h\n if tally >= 1.5e-4:\n idxs.append(i)\n tally = 0\n return idxs\n\n\ndef fig_setup(score, psi):\n fig = plt.figure()\n ax = fig.gca()\n if score < 100:\n scorestr = \"DeltaV\"\n else:\n scorestr = \"smallest distance\"\n if psi is not None:\n fig.suptitle(f\"{scorestr} = {score}, psi = {[round(p,3) for p in psi]}\")\n else:\n fig.suptitle(f\"{scorestr} = {score}\")\n\n ax.set_aspect(\"equal\")\n return fig, ax\n\n\ndef orbitplot3d(completed_path, psi, filepath=\".\", title=None):\n Dv, path = completed_path\n\n xs = [e[0] for e in path]\n ys = [e[1] for e in path]\n hs = [e[4] for e in path]\n fig = plt.figure()\n ax = fig.gca(projection=\"3d\")\n fig.suptitle(f\"DeltaV = {Dv}, hyperparameters = {[round(p,3) for p in psi]}\")\n\n ax.plot(xs, ys, range(len(hs)), color=\"black\", linewidth=2)\n\n ax.scatter([EARTH_POSITION_X], [0], color=\"blue\")\n ax.scatter([LUNAR_POSITION_X], [0], color=\"grey\")\n ax.scatter([L1_POSITION_X], [0], color=\"pink\")\n\n circle_x = [cos(x / 100.0 * 2 * pi) for x in range(100)]\n circle_y = [sin(x / 100.0 * 2 * pi) for x in range(100)]\n\n ax.plot(circle_x, circle_y, color=\"grey\")\n\n # ax.scatter(xs[-0], ys[0], color=\"green\")\n ax.scatter(xs[-1], ys[-1], len(hs), color=\"red\")\n\n plt.show()\n\n\ndef leo_plot(completed_path, psi=None, filepath=\".\", title=None, fig=None):\n \"\"\"\n input: output of launch_sim, its launch parameters, and an optional title if the file is to be saved\n \n Plots a figure of the inputted orbit, with start point marked in green, and point marked in red, earth and moon/mars marked as well.\n \"\"\"\n score, path = completed_path # [Dv,[x,y,px,py,h]]\n xs, ys, pxs, pys, hs, ts = np.array(path).T\n # ts = np.linspace(0, sum(hs), len(path))\n\n increment = 500\n fig, ax = fig_setup(score, psi)\n\n ax.plot(xs, ys, color=\"black\", linewidth=\"2\")\n earth = plt.Circle((EARTH_POSITION_X, 0), EARTH_RADIUS / UNIT_LENGTH, color=\"blue\")\n ax.add_artist(earth)\n\n if title is None:\n plt.show()\n else:\n filename = f\"{filepath}/path_{title}.pdf\"\n plt.savefig(filename)\n\n\ndef multi_plot(completed_paths, psis, plot_type, filepath=\".\", title=None):\n N = len(completed_paths)\n if len(psis) != N:\n raise Exception(\"must have the same number of psis as paths\")\n fig = plt.figure()\n ax = plt.gca()\n ax.set_aspect(\"equal\")\n cmap_cycle = [\"bone\", \"autumn\", \"winter\", \"summer\", \"spring\"]\n\n for i, [cpath, psi] in enumerate(zip(completed_paths, psis)):\n _, path = cpath\n ts = np.array(path).T[5]\n idxs = get_idxs(ts)\n increment = len(idxs) / 100\n # print(len(idxs),increment)\n\n cm = plt.get_cmap(cmap_cycle[i % len(cmap_cycle)])\n ax.set_prop_cycle(\n \"color\", [cm(max(0, 0.9 - (1. * i / 100))) for i in range(100)]\n )\n plot_type(cpath, psi, multi_mode=True)\n\n if title is None:\n plt.show()\n else:\n filename = f\"{filepath}/path_{title}.pdf\"\n plt.savefig(filename)\n plt.close()\n\n\ndef orbital_circle(celestial):\n \"\"\"\n input: celestial enum\n returns: x/y points for a plottable circle of the celestial's orbit\n \"\"\"\n if celestial == \"moon\":\n circle_x = [LUNAR_POSITION_X * cos(x / 100.0 * 2 * pi) for x in range(0, 101)]\n circle_y = [LUNAR_POSITION_X * sin(x / 100.0 * 2 * pi) for x in range(0, 101)]\n elif celestial == \"earth\":\n return None\n\n return [circle_x, circle_y]\n\n" }, { "alpha_fraction": 0.4669375717639923, "alphanum_fraction": 0.4791281819343567, "avg_line_length": 33.70512771606445, "blob_id": "0c4f3623234a6c933753a75daef7bc9b56b1a79a", "content_id": "379342f63470cc81d77493f4ec8e218db1f7e2a1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2707, "license_type": "permissive", "max_line_length": 85, "num_lines": 78, "path": "/code/marscudasim/coordinate_system.h", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include <math.h>\n\n__host__ __device__\ninline void rotate(double vx, double vy, double vz,\n double kx, double ky, double kz,\n double* x, double* y, double* z,\n double theta)\n{\n double cos_theta = cos(theta);\n double sin_theta = sin(theta);\n double cx = ky * vz - kz * vy;\n double cy = kz * vx - kx * vz;\n double cz = kx * vy - ky * vx;\n double d = vx * kx + vy * ky + vz * kz;\n *x = vx * cos_theta + cx * sin_theta + (d * kx) * (1 - cos_theta);\n *y = vy * cos_theta + cy * sin_theta + (d * ky) * (1 - cos_theta);\n *z = vz * cos_theta + cz * sin_theta + (d * kz) * (1 - cos_theta);\n}\n\n__host__ __device__\ninline void velocity_cartesian2spherical(double x, double y, double z,\n double xdot, double ydot, double zdot,\n double* vR, double* vtheta, double* vphi)\n{\n *vR = (x * xdot + y * ydot + z * zdot) / (sqrt(x * x + y * y + z * z));\n *vtheta = ((x * xdot + y * ydot) * z - (x * x + y * y) * zdot) / \n ((x * x + y * y + z * z) * sqrt(x * x + y * y));\n *vphi = (x * ydot - xdot * y) / (x * x + y * y);\n}\n\n__host__ __device__\ninline void spherical2cartesian(double R, double theta, double phi,\n double* x, double* y, double* z)\n{\n /*\n *x = sin(theta) * cos(phi) * R + cos(theta) * cos(phi) * theta - sin(phi) * phi;\n *y = sin(theta) * sin(phi) * R + cos(theta) * sin(phi) * theta + cos(phi) * phi;\n *z = cos(theta) * R - sin(theta) * theta;\n */\n *x = sin(theta) * cos(phi) * R;\n *y = sin(theta) * sin(phi) * R;\n *z = cos(theta) * R;\n}\n\n__host__ __device__\ninline void cartesian2spherical(double x, double y, double z,\n double* R_, double* theta_, double* phi_)\n{\n /*\n double rho = sqrt(x*x + y*y + z*z);\n double theta = acos(z / rho);\n double phi = atan(y / x);\n *R_ = sin(theta) * cos(phi) * x + sin(theta) * sin(phi) * y + cos(theta) * z;\n *theta_ = cos(theta) * cos(phi) * x + cos(theta) * sin(phi) * y - sin(theta) * z;\n *phi_ = -sin(phi) * x + cos(phi) * y;\n */\n *R_ = sqrt(x*x + y*y + z*z);\n *theta_ = acos(z / *R_);\n *phi_ = atan(y / x);\n}\n\n__host__ __device__\ninline double distance(double R0, double theta0, double phi0,\n double R1, double theta1, double phi1)\n{\n return sqrt(R0 * R0 + R1 * R1 - 2 * R0 * R1 *\n (sin(theta0) * sin(theta1) * cos(phi0 - phi1) + \n cos(theta0) * cos(theta1)));\n}\n\n__host__ __device__\ninline double lerp(double v0, double v1, double t)\n{\n return (1 - t) * v0 + t * v1;\n //return v0 + t * (v1 - v0);\n}\n" }, { "alpha_fraction": 0.42012113332748413, "alphanum_fraction": 0.4937871992588043, "avg_line_length": 34.33948516845703, "blob_id": "6ab39e275556ed8b228a3b87967046c7fe5093bc", "content_id": "152fc1c916ad2a06ba46d0fc147afc8a629ee4eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19154, "license_type": "permissive", "max_line_length": 100, "num_lines": 542, "path": "/code/main_bsc.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\"\"\"\nReduced 3-body Problem testing script\n====================================\nTesting the reduced 3-body problem solvers with different numerical algorithms.\nTODO: Add more description + how to use\n\"\"\"\nimport multiprocessing\nimport pathlib\nimport sys\nimport time\nfrom math import cos, pi, sin\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom orbsim import DAY, EARTH_RADIUS, LUNAR_RADIUS\nfrom orbsim.r3b_2d import (\n EARTH_POSITION_X,\n L1_POSITION_X,\n LEO_RADIUS,\n LEO_VELOCITY,\n LLO_RADIUS,\n LLO_VELOCITY,\n LUNAR_POSITION_X,\n ORBITAL_TOLERANCE,\n UNIT_LENGTH,\n UNIT_TIME,\n UNIT_VELOCITY,\n k,\n)\nfrom r3b_bsc import reduced3body as r3b\n\n\ndef run_test():\n\n MODE = sys.argv[1]\n\n FORMAT = \"png\"\n # FORMAT = \"pdf\"\n\n mode_dict = {\n # Keys: Possible input arguments (argv)\n # Values: Output folder name of associated log/figs of run\n # Precalculated initial conditions for specific orbit types\n \"leo\": \"demo_leo_closed\",\n \"llo\": \"demo_llo_closed\",\n \"h\": \"demo_hohmann\",\n \"h3\": \"demo_hohmann_3_days\",\n \"h1\": \"demo_hohmann_1_day\",\n \"hr\": \"demo_hohmann_reverse\",\n \"ls\": \"demo_leto_short\",\n \"ll\": \"demo_leto_long\",\n # \"l1\": \"demo_L1\", # TODO: Takes a long time, probably doesn't work, look at later\n # Search for trajectories\n \"sh\": \"search_hohmann\",\n \"sl\": \"search_leto\", # FIXME: Figure out why no figs when search_hohmann have\n # \"slp8\": \"search_low_energy_parts_8\", # TODO: Doesn't work - what does it do?\n # \"sr\": \"search_refine\" # TODO: Doesn't work - what does it do?\n }\n MODE_NAME = mode_dict[MODE]\n\n OUTPUT_DIR = \"tests/r3b_2d/simulation_output/\" + MODE_NAME + \"/\"\n\n pathlib.Path(OUTPUT_DIR).mkdir(parents=True, exist_ok=True)\n\n # All prints are redirected to log file\n old_stdout = sys.stdout\n log_file = open(OUTPUT_DIR + \"log_\" + MODE_NAME + \".log\", \"w\")\n sys.stdout = log_file\n\n # Threads will typically be 8 on quadcore machines\n threads = (\n multiprocessing.cpu_count()\n ) # If raises NotImplementedError, do this instead https://stackoverflow.com/a/14840102/2948823\n\n runtime = time.time()\n\n n = 1000000 # TODO: What the duck is n doing here\n\n # Set coordinates\n if MODE == \"leo\": # Low Earth Orbit, one closed orbit\n duration = (2.0 * pi * LEO_RADIUS / LEO_VELOCITY) / (UNIT_TIME * DAY)\n r = LEO_RADIUS / UNIT_LENGTH\n v = 0.99732 * LEO_VELOCITY / UNIT_VELOCITY\n theta = 0\n x = r * cos(theta)\n y = r * sin(theta)\n v_x = -v * y / r\n v_y = v * x / r\n pos = 0\n ang = 0\n burn = 0\n x0 = EARTH_POSITION_X + x\n y0 = y\n p0_x = v_x - y0\n p0_y = v_y + x0\n elif MODE == \"llo\": # Low Lunar Orbit, one closed orbit\n duration = (2.0 * pi * LLO_RADIUS / LLO_VELOCITY) / (UNIT_TIME * DAY)\n r = LLO_RADIUS / UNIT_LENGTH\n v = 0.99732 * LLO_VELOCITY / UNIT_VELOCITY\n theta = 0\n x = r * cos(theta)\n y = r * sin(theta)\n v_x = -v * y / r\n v_y = v * x / r\n pos = 0\n ang = 0\n burn = 0\n x0 = LUNAR_POSITION_X + x\n y0 = y\n p0_x = v_x - y0\n p0_y = v_y + x0\n elif MODE == \"h\": # Hohmann transfer orbit\n # MODE = 'search_refine'\n # --------------------------------------------------------------------------\n duration = 5 / UNIT_TIME\n pos = -2.086814820119193\n ang = -0.000122173047640\n burn = 3.111181716545691 / UNIT_VELOCITY\n x0 = -0.020532317163607\n y0 = -0.014769797663479\n p0_x = 9.302400979050308\n p0_y = -5.289712560652044\n # --------------------------------------------------------------------------\n # dV(earth-escape) = 3.111182 km/s\n # dV(moon-capture) = 0.800682 km/s\n # dV(total) = 3.911863 km/s\n # Flight-time = 4.300078 days\n # --------------------------------------------------------------------------\n elif MODE == \"hr\": # Reverse Hohmann\n # --------------------------------------------------------------------------\n duration = 4 / UNIT_TIME\n pos = -2.282942228154665\n ang = 0.000000000000000\n burn = -3.149483130653266 / UNIT_VELOCITY\n x0 = -0.023249912090507\n y0 = -0.012853859046429\n p0_x = -8.098481905534163\n p0_y = 6.978997254692934\n # --------------------------------------------------------------------------\n # dV(earth-escape) = 3.149483 km/s\n # dV(moon-capture) = 0.968488 km/s\n # dV(total) = 4.117971 km/s\n # Flight-time = 3.875497 days\n # --------------------------------------------------------------------------\n elif MODE == \"ll\": # LETO long\n # --------------------------------------------------------------------------\n duration = 195 / UNIT_TIME\n pos = 3.794182930145708\n ang = 0.023901745288554\n burn = 3.090702702702703 / UNIT_VELOCITY\n x0 = -0.025645129237870\n y0 = -0.010311570301966\n p0_x = 6.539303578815582\n p0_y = -8.449205705334165\n # --------------------------------------------------------------------------\n # dV(earth-escape) = 3.090703 km/s\n # dV(moon-capture) = 0.704114 km/s\n # dV(total) = 3.794816 km/s\n # Flight-time = 194.275480 days\n # --------------------------------------------------------------------------\n # --------------------------------------------------------------------------\n # MODE = 'search_refine'\n # duration = 195/unit_time\n # pos = 3.794182930145708\n # ang = 0.023901745288554\n # burn = 3.090702702702703/unit_velocity\n # x0 = -0.025645129237870\n # y0 = -0.010311570301966\n # p0_x = 6.539303578815583\n # p0_y = -8.449205705334164\n # --------------------------------------------------------------------------\n # dV(earth-escape) = 3.090703 km/s\n # dV(moon-capture) = 0.704114 km/s\n # dV(total) = 3.794817 km/s\n # Flight-time = 194.275480 days\n # --------------------------------------------------------------------------\n elif MODE == \"ls\": # LETO short\n # MODE = 'search_refine'\n # --------------------------------------------------------------------------\n duration = 41 / UNIT_TIME\n pos = -0.138042744751570\n ang = -0.144259374836607\n burn = 3.127288444444444 / UNIT_VELOCITY\n x0 = 0.004665728429046\n y0 = -0.002336647636098\n p0_x = 1.904735175752430\n p0_y = 10.504985512873279\n # --------------------------------------------------------------------------\n # dV(earth-escape) = 3.127288 km/s\n # dV(moon-capture) = 0.768534 km/s\n # dV(total) = 3.895822 km/s\n # Flight-time = 40.617871 days\n # --------------------------------------------------------------------------\n elif MODE == \"h3\": # 3-day Hohmann\n # MODE = 'search_refine'\n # --------------------------------------------------------------------------\n duration = 3 / UNIT_TIME\n pos = -2.272183066647597\n ang = -0.075821466029764\n burn = 3.135519748743719 / UNIT_VELOCITY\n x0 = -0.023110975767437\n y0 = -0.012972499765730\n p0_x = 8.032228991913522\n p0_y = -7.100537706154897\n # --------------------------------------------------------------------------\n # dV(earth-escape) = 3.135520 km/s\n # dV(moon-capture) = 0.879826 km/s\n # dV(total) = 4.015346 km/s\n # Flight-time = 2.999939 days\n # --------------------------------------------------------------------------\n elif MODE == \"h1\": # 1-day Hohmann\n # MODE = 'search_refine'\n duration = 1 / UNIT_TIME\n pos = -2.277654673852600\n ang = 0.047996554429844\n burn = 3.810000000000000 / UNIT_VELOCITY\n x0 = -0.023181791813268\n y0 = -0.012912351430812\n p0_x = 8.764829132987316\n p0_y = -7.263069305305378\n # --------------------------------------------------------------------------\n # dV(earth-escape) = 3.810000 km/s\n # dV(moon-capture) = 3.319455 km/s\n # dV(total) = 7.129455 km/s\n # Flight-time = 0.997234 days\n # --------------------------------------------------------------------------\n elif MODE == \"l1\": # Earth to L1 point\n MODE = \"search_refine\"\n # --------------------------------------------------------------------------\n duration = 191 / UNIT_TIME\n pos = 2.843432239707429\n ang = 0.000000000000000\n burn = 3.091851851851852 / UNIT_VELOCITY\n x0 = -0.028385246222264\n y0 = 0.004988337832881\n p0_x = -3.136296304910217\n p0_y = -10.217405925499762\n # --------------------------------------------------------------------------\n # dV(earth-escape) = 3.091852 km/s\n # dV(at L1) = 0.676226 km/s\n # dV(total) = 3.768078 km/s\n # Flight-time = 190.001881 days\n # --------------------------------------------------------------------------\n\n #################### FUNCTION CALLS ####################\n\n if MODE == \"sh\": # Search for Hohmann:\n ts, xs, ys, p_xs, p_ys, step_errors, h_list = r3b.hohmann(threads, n)\n elif MODE == \"sl\": # Search for LETO\n ts, xs, ys, p_xs, p_ys, step_errors, h_list = r3b.low_energy(threads, n)\n elif MODE == \"sl_parts8\":\n ts, xs, ys, p_xs, p_ys, step_errors, h_list = r3b.low_energy_parts8(threads, n)\n elif MODE == \"search_refine\":\n ts, xs, ys, p_xs, p_ys, step_errors, h_list = r3b.refine(\n threads, n, duration, pos, ang, burn, x0, y0, p0_x, p0_y\n )\n else:\n ts, xs, ys, p_xs, p_ys, step_errors, h_list = r3b.trajectory(\n n, duration, pos, ang, burn, x0, y0, p0_x, p0_y\n )\n H_list = (\n p_xs ** 2 / 2\n + p_ys ** 2 / 2\n + ys * p_xs\n - xs * p_ys\n - (1 - k) / np.sqrt(np.power(k + xs, 2) + np.power(ys, 2))\n - k / np.sqrt(np.power(1 - k - xs, 2) + np.power(ys, 2))\n )\n print(\"# Final position: %f %f\" % (xs[n - 1], ys[n - 1]))\n print(\"# Final impulse: %f %f\" % (p_xs[n - 1], p_ys[n - 1]))\n print(\"# Final H: %f\" % (H_list[n - 1]))\n runtime = time.time() - runtime\n print(\"# Total runtime = %3.2fs\" % (runtime))\n print(\n \"# --------------------------------------------------------------------------\"\n )\n print(\"# --- Done with FUNCTION CALLS\")\n # exit()\n\n #################### PLOTS: POSITION ####################\n\n n2 = int(n / 2)\n\n xs1 = xs[:n2]\n ys1 = ys[:n2]\n xs2 = xs[n2:]\n ys2 = ys[n2:]\n\n X_list1 = xs[:n2] * np.cos(ts[:n2]) - ys[:n2] * np.sin(ts[:n2])\n Y_list1 = xs[:n2] * np.sin(ts[:n2]) + ys[:n2] * np.cos(ts[:n2])\n X_list2 = xs[n2:] * np.cos(ts[n2:]) - ys[n2:] * np.sin(ts[n2:])\n Y_list2 = xs[n2:] * np.sin(ts[n2:]) + ys[n2:] * np.cos(ts[n2:])\n\n X_list_earth = EARTH_POSITION_X * np.cos(ts)\n Y_list_earth = -EARTH_POSITION_X * np.sin(ts)\n\n X_list_moon = LUNAR_POSITION_X * np.cos(ts)\n Y_list_moon = LUNAR_POSITION_X * np.sin(ts)\n\n # Rel. step_error\n plt.figure()\n plt.plot(ts * UNIT_TIME, step_errors)\n plt.xlabel(\"time (days)\")\n plt.ylabel(\"step error\")\n plt.yscale(\"log\")\n plt.savefig(\n OUTPUT_DIR + \"{}-step_error_vs_time.{}\".format(MODE_NAME, FORMAT),\n bbox_inches=\"tight\",\n )\n\n # Step sizes\n plt.figure()\n plt.plot(ts * UNIT_TIME, h_list)\n plt.xlabel(\"time (days)\")\n plt.ylabel(\"step size\")\n plt.yscale(\"log\")\n plt.savefig(\n OUTPUT_DIR + \"{}-step_size_vs_time.{}\".format(MODE_NAME, FORMAT),\n bbox_inches=\"tight\",\n )\n\n # Old and weird \"Total energy error\"\n # # Total energy error\n # H_avg = np.sum(H_list) / n\n # H_relative_errors = (H_list - H_avg) / H_avg\n # plt.figure()\n # plt.plot(ts * UNIT_TIME, H_relative_errors)\n # plt.xlabel(\"time (days)\")\n # plt.ylabel(\"Hamiltonian relative error (arbitrary units)\")\n # plt.savefig(\n # OUTPUT_DIR + \"{}-energy_error_vs_time.{}\".format(MODE_NAME, FORMAT),\n # bbox_inches=\"tight\",\n # )\n\n # Total energy\n plt.figure()\n plt.plot(ts * UNIT_TIME, H_list)\n plt.xlabel(\"time (days)\")\n plt.ylabel(\"Hamiltonian (arbitrary units)\")\n plt.savefig(\n OUTPUT_DIR + \"{}-energy_vs_time.{}\".format(MODE_NAME, FORMAT),\n bbox_inches=\"tight\",\n )\n\n # Zoom earth\n xlim = 0.02\n ylim = 0.02\n xmin = EARTH_POSITION_X - xlim\n xmax = EARTH_POSITION_X + xlim\n ymin = -ylim\n ymax = ylim\n plt.figure()\n earth = plt.Circle((EARTH_POSITION_X, 0), EARTH_RADIUS / UNIT_LENGTH, color=\"blue\")\n earthorbit1 = plt.Circle(\n (EARTH_POSITION_X, 0),\n (LEO_RADIUS - ORBITAL_TOLERANCE) / UNIT_LENGTH,\n color=\"g\",\n fill=False,\n )\n earthorbit2 = plt.Circle(\n (EARTH_POSITION_X, 0),\n (LEO_RADIUS + ORBITAL_TOLERANCE) / UNIT_LENGTH,\n color=\"g\",\n fill=False,\n )\n plt.gcf().gca().add_artist(earth)\n plt.gcf().gca().add_artist(earthorbit1)\n plt.gcf().gca().add_artist(earthorbit2)\n plt.plot(xs1, ys1, \"r-\")\n plt.plot(xs2, ys2, \"k-\")\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n plt.gca().set_aspect(\"equal\", adjustable=\"box\")\n plt.xlabel(\"x-position (arbitrary units)\")\n plt.ylabel(\"y-position (arbitrary units)\")\n plt.savefig(\n OUTPUT_DIR + \"{}-earth_exit_y(x).{}\".format(MODE_NAME, FORMAT),\n bbox_inches=\"tight\",\n )\n\n # Zoom moon\n xlim = 0.0055\n ylim = 0.0055\n xmin = LUNAR_POSITION_X - xlim\n xmax = LUNAR_POSITION_X + xlim\n ymin = -ylim\n ymax = ylim\n plt.figure()\n moon = plt.Circle((LUNAR_POSITION_X, 0), LUNAR_RADIUS / UNIT_LENGTH, color=\"grey\")\n moonorbit1 = plt.Circle(\n (LUNAR_POSITION_X, 0),\n (LLO_RADIUS - ORBITAL_TOLERANCE) / UNIT_LENGTH,\n color=\"g\",\n fill=False,\n )\n moonorbit2 = plt.Circle(\n (LUNAR_POSITION_X, 0),\n (LLO_RADIUS + ORBITAL_TOLERANCE) / UNIT_LENGTH,\n color=\"g\",\n fill=False,\n )\n plt.gcf().gca().add_artist(moon)\n plt.gcf().gca().add_artist(moonorbit1)\n plt.gcf().gca().add_artist(moonorbit2)\n plt.plot(xs1, ys1, \"r-\")\n plt.plot(xs2, ys2, \"k-\")\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n plt.gca().set_aspect(\"equal\", adjustable=\"box\")\n plt.xlabel(\"x-position (arbitrary units)\")\n plt.ylabel(\"y-position (arbitrary units)\")\n plt.savefig(\n OUTPUT_DIR + \"{}-moon_entry_y(x).{}\".format(MODE_NAME, FORMAT),\n bbox_inches=\"tight\",\n )\n\n # View center of mass\n xlim = 1.3\n ylim = 1.3\n xmin = -xlim\n xmax = xlim\n ymin = -ylim\n ymax = ylim\n\n # Position plot (X,Y)\n plt.figure()\n plt.plot(X_list1, Y_list1, \"r\")\n plt.plot(X_list2, Y_list2, \"k\")\n plt.plot(X_list_earth, Y_list_earth, \"blue\")\n plt.plot(X_list_moon, Y_list_moon, \"grey\")\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n plt.gca().set_aspect(\"equal\", adjustable=\"box\")\n plt.xlabel(\"x-position (arbitrary units)\")\n plt.ylabel(\"y-position (arbitrary units)\")\n plt.savefig(\n OUTPUT_DIR + \"{}-Y(X)_inertial.{}\".format(MODE_NAME, FORMAT),\n bbox_inches=\"tight\",\n )\n\n # Position plot (x,y)\n plt.figure()\n plt.plot(xs1, ys1, \"r-\")\n plt.plot(xs2, ys2, \"k-\")\n earth = plt.Circle((EARTH_POSITION_X, 0), EARTH_RADIUS / UNIT_LENGTH, color=\"blue\")\n earthorbit1 = plt.Circle(\n (EARTH_POSITION_X, 0),\n (LEO_RADIUS - ORBITAL_TOLERANCE) / UNIT_LENGTH,\n color=\"g\",\n fill=False,\n )\n earthorbit2 = plt.Circle(\n (EARTH_POSITION_X, 0),\n (LEO_RADIUS + ORBITAL_TOLERANCE) / UNIT_LENGTH,\n color=\"g\",\n fill=False,\n )\n moon = plt.Circle((LUNAR_POSITION_X, 0), LUNAR_RADIUS / UNIT_LENGTH, color=\"grey\")\n moonorbit1 = plt.Circle(\n (LUNAR_POSITION_X, 0),\n (LLO_RADIUS - ORBITAL_TOLERANCE) / UNIT_LENGTH,\n color=\"g\",\n fill=False,\n )\n moonorbit2 = plt.Circle(\n (LUNAR_POSITION_X, 0),\n (LLO_RADIUS + ORBITAL_TOLERANCE) / UNIT_LENGTH,\n color=\"g\",\n fill=False,\n )\n plt.gcf().gca().add_artist(earth)\n plt.gcf().gca().add_artist(earthorbit1)\n plt.gcf().gca().add_artist(earthorbit2)\n plt.gcf().gca().add_artist(moon)\n plt.gcf().gca().add_artist(moonorbit1)\n plt.gcf().gca().add_artist(moonorbit2)\n plt.plot(L1_POSITION_X, 0, \"gx\")\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n plt.gca().set_aspect(\"equal\", adjustable=\"box\")\n plt.xlabel(\"x-position (arbitrary units)\")\n plt.ylabel(\"y-position (arbitrary units)\")\n plt.savefig(\n OUTPUT_DIR + \"{}-y(x)_corotating.{}\".format(MODE_NAME, FORMAT),\n bbox_inches=\"tight\",\n )\n # plt.savefig('r3b/r3b_y(x)_euler_symplectic.{}',MODE_NAME, FORMAT='tight')\n # plt.show()\n plt.close()\n print(\"# --- Done with PLOTS\")\n\n # # #################### PLOTS: VELOCITY ####################\n\n # plt.figure()\n # plt.plot(ts, omegalist_e)\n # plt.xlabel(\"time (arbitrary units)\")\n # plt.ylabel(\"velocity (arbitrary units)\")\n # plt.savefig('r3b/r3b_omega(t)_euler_explicit.{}')\n # # plt.MODE_NAME, FORMATow()\n # plt.close()\n\n # #################### PHASE-SPACE TRAJECTORY PLOTS ####################\n\n # # Explicit Euler phase-space trajectory\n # plt.figure()\n # plt.plot(thetalist_e[:len(thetalist_e)/2], omegalist_e[:len(omegalist_e)/2], 'r')\n # plt.plot(thetalist_e[len(thetalist_e)/2:], omegalist_e[len(omegalist_e)/2:], 'b')\n # plt.xlabel(\"position (arbitrary units)\")\n # plt.ylabel(\"velocity (arbitrary units)\")\n # plt.savefig('r3b/r3b_phase-space_euler_explicit.{}',MODE_NAME, FORMAT='tight')\n # #plt.show()\n # plt.close()\n\n # # Implicit Euler phase-space trajectory\n # plt.figure()\n # plt.plot(thetalist_i[:len(thetalist_i)/2], omegalist_i[:len(omegalist_i)/2], 'r')\n # plt.plot(thetalist_i[len(thetalist_i)/2:], omegalist_i[len(omegalist_i)/2:], 'b')\n # plt.xlabel(\"position (arbitrary units)\")\n # plt.ylabel(\"velocity (arbitrary units)\")\n # plt.savefig('r3b/r3b_phase-space_euler_implicit.{}',MODE_NAME, FORMAT='tight')\n # #plt.show()\n # plt.close()\n\n # # Symplectic Euler phase-space trajectory\n # plt.figure()\n # plt.plot(thetalist[:len(thetalist)/2], omegalist[:len(omegalist)/2], 'r')\n # plt.plot(thetalist[len(thetalist)/2:], omegalist[len(omegalist)/2:], 'b')\n # plt.xlabel(\"position (arbitrary units)\")\n # plt.ylabel(\"velocity (arbitrary units)\")\n # plt.savefig('r3b/r3b_phase-space_euler_symplectic.{}',MODE_NAME, FORMAT='tight')\n # #plt.show()\n # plt.close()\n\n # print(\"--- Done with PHASE-SPACE TRAJECTORY PLOTS\")\n\n sys.stdout = old_stdout\n log_file.close()\n\n\nif __name__ == \"__main__\":\n run_test()\n" }, { "alpha_fraction": 0.5801447629928589, "alphanum_fraction": 0.608066201210022, "avg_line_length": 23.794872283935547, "blob_id": "86d3e5d9c805c0c44b30d06a413bae8036531b88", "content_id": "6cc15dd9e2e28822d1fde4400e3fccf2ff8952dc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 967, "license_type": "permissive", "max_line_length": 59, "num_lines": 39, "path": "/docker/examples/_ex1.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "def run_example1(impulses=4):\n import pykep as pk\n import pygmo as pg\n import numpy as np\n from matplotlib import pyplot as plt\n from pykep.examples import add_gradient, algo_factory\n\n # problem\n udp = add_gradient(pk.trajopt.pl2pl_N_impulses(\n start=pk.planet.jpl_lp('earth'),\n target=pk.planet.jpl_lp('venus'),\n N_max=impulses,\n tof=[100., 1000.],\n vinf=[0., 4],\n phase_free=False,\n multi_objective=False,\n t0=[pk.epoch(0), pk.epoch(1000)]), with_grad=False)\n prob = pg.problem(udp)\n\n # algorithm\n uda = pg.cmaes(gen=1000)\n algo = pg.algorithm(uda)\n algo.set_verbosity(10)\n\n # population\n pop = pg.population(prob, 20)\n\n # solve the problem\n pop = algo.evolve(pop)\n\n # inspect the solution\n udp.udp_inner.plot_trajectory(pop.champion_x)\n plt.ion()\n plt.show()\n\n udp.udp_inner.pretty(pop.champion_x)\n\nif __name__ == \"__main__\":\n run_example1()\n" }, { "alpha_fraction": 0.782608687877655, "alphanum_fraction": 0.782608687877655, "avg_line_length": 29.66666603088379, "blob_id": "0d98dfb612885997dfdccf97cdd5a2059b9ec8ca", "content_id": "dca9bae3ca7f1992280a7d8f53327727c301b618", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 92, "license_type": "permissive", "max_line_length": 46, "num_lines": 3, "path": "/code/setup.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\nsetup(name=\"orbsim\", packages=find_packages())\n" }, { "alpha_fraction": 0.5910828113555908, "alphanum_fraction": 0.5949044823646545, "avg_line_length": 27, "blob_id": "2520f302894422b4a6e09e7b3dd7595a5f8d1a3e", "content_id": "a872ad9066c18fb761dfedf824e6ac56ed5aeddc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1570, "license_type": "permissive", "max_line_length": 85, "num_lines": 56, "path": "/code/pyscripts/computepspace.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\nfrom matplotlib.colors import Normalize\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport pandas as pd\n\nimport numpy as np\nfrom math import pi, log\nfrom scipy.stats import rankdata\nfrom argparse import ArgumentParser\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"fp\", type=str)\n parser.add_argument(\n \"bounds\", type=float, nargs=4, help=\"lowerbound x, upperbound x, lb y, ub y\"\n )\n args = parser.parse_args()\n filepath = args.fp\n dims = args.bounds\n\n # === setup problem space, either real or Karpathy toy problem for validation ===\n # pspace = np.loadtxt(\"golf_course_zoom_s1024.txt\")\n pspace = np.loadtxt(filepath)\n # uncomment this line if you want smooth toy-problem\n # pspace = G\n print(dims)\n lbp, ubp, lbb, ubb = dims\n\n # ******************** PLOTTING ****************************************\n # ======== establish figs =================\n fig = plt.figure()\n ax = fig.gca()\n\n # ============= plot problem space bg images ====\n cmap = plt.cm.viridis\n colors = Normalize(min(pspace.flatten()), max(pspace.flatten()))(pspace)\n colors = cmap(colors)\n plt.axis('equal')\n plt.imshow(\n colors,\n vmin=min(pspace.flatten()),\n vmax=max(pspace.flatten()),\n extent=[lbb, ubb,lbp, ubp],\n aspect=\"auto\",\n interpolation=\"none\",\n origin=\"lower\",\n )\n ax.set_xlabel(\"burnDv\")\n ax.set_ylabel(\"position\")\n\n\n plt.colorbar()\n\n plt.show()\n\n" }, { "alpha_fraction": 0.704827606678009, "alphanum_fraction": 0.7462068796157837, "avg_line_length": 26.923076629638672, "blob_id": "93755e935ed334596091817f1465c102f0fd4a1b", "content_id": "141be0c9cefe78b7168355e3f84bd42a598e6768", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 725, "license_type": "permissive", "max_line_length": 66, "num_lines": 26, "path": "/docker/Dockerfile", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "# Use an official Python runtime as a parent image\nFROM python:3.6\n\n# Set the working directory to /app\nWORKDIR /app\n\n# Copy the current directory contents into the container at /app\nADD . /app\n\n# Install any needed packages specified in requirements.txt\nRUN pip install --trusted-host pypi.python.org -r requirements.txt\n# RUN apt-get install g++\n# RUN wget https://cmake.org/files/v3.11/cmake-3.11.0.tar.gz\n# RUN tar -xzf cmake-3.11.0.tar.gz\n# RUN cmake-3.11.0/bootstrap\n# RUN cmake-3.11.0/make\n# RUN cmake-3.11.0/make install\n\n# Make port 80 available to the world outside this container\nEXPOSE 80\n\n# Define environment variable\nENV NAME World\n\n# Run app.py when the container launches\n# CMD [\"python\", \"examples/_ex2.py\"]" }, { "alpha_fraction": 0.875, "alphanum_fraction": 0.875, "avg_line_length": 8.5, "blob_id": "1d702f7e4cad55151b59aefba37f2272d0b62f7f", "content_id": "558dddea8546717c431b7f773f60b307a86d850c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 56, "license_type": "permissive", "max_line_length": 21, "num_lines": 6, "path": "/docker/requirements.txt", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "pyKep\npygmo\npygmo_plugins_nonfree\nscipy\nnumpy\nmatplotlib" }, { "alpha_fraction": 0.7516708970069885, "alphanum_fraction": 0.7623644471168518, "avg_line_length": 57.54782485961914, "blob_id": "654ab79972b3a3a27754759b2381dbe79d2ba01c", "content_id": "afc17c9a67d8da01e9f4a2e5b96a082d7dea591c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6799, "license_type": "permissive", "max_line_length": 608, "num_lines": 115, "path": "/notes/poul-questions/poul-questions.md", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "---\ntypora-copy-images-to: ./assets\nheader-includes: \\usepackage{xcolor}\nurlcolor: blue\n---\n\n# Spørgsmål om mekanik til Poul\n\n## Udtryk for L1 og sammenhæng med omløbstid\n\nPå Wikipedia [står der](https://en.wikipedia.org/wiki/Lagrangian_point#L1) at L1 kan udtrykkes som følgende:\n\n> This distance can be described as being such that the orbital period, corresponding to a circular orbit with this distance as radius around M2 in the absence of M1, is that of M2 around M1, divided by $\\sqrt{3} \\approx 1.73$:\n\n$$\nT_{s,M_2}(r) = \\frac{T_{M_2,M_1}(R)}{\\sqrt{3}}\n$$\n\nJeg har forsøgt at bekræfte dette udtryk nedenfor ved i formlen for omløbstid $T = \\dfrac{2\\pi}{\\sqrt{G M}} R^{3/2}$ at indsætte $L_1 = R \\left(\\frac{m}{3M}\\right)^{1/3}$ afstanden ind på R, men jeg det går ikke helt op. Se udregning nedenfor:\n![l1-t](assets/l1-t.JPG){ width=100% }\n\n#### Poul svarer\n\n\\leavevmode {\\color{red}\n\nKigger lidt mere i dybden på udtrykkene for Lagrangepunkterne, så kan vi kigge på det senere.\n\n}\n\n## Jacobi integral\n\nHvad er [det](https://en.wikipedia.org/wiki/Jacobi_integral) og hvorfor er jeg ikke stødt på det før nu?\n\nSpecielt sætningen:\n> ...so the energy measured in this system of reference (and hence, the Jacobi integral) is a constant of motion.\"\n\nJeg kan ved første øjenkast heller ikke lige genkende ligningerne, hvilket også undrer mig lidt.\n\nSe også denne [figurtekst](https://en.wikipedia.org/wiki/Zero-velocity_surface#/media/File:Circular_restricted_3-body_problem.png) på Wikipedia:\n\n> A trajectory (red) in the planar circular restricted 3-body problem that orbits the heavier body a number of times before escaping into an orbit around the lighter body. The contours denote values of the Jacobi integral. The dark blue region is an excluded region for the trajectory, enclosed by a zero-velocity surface that cannot be crossed.\n\n#### Poul svarer\n\n\\leavevmode {\\color{red}\n\nJacobi integral generelt er en bevægelseskonstant, som muligvis kan findes ud fra Noether's theorem. Man kan vise at det er en bevægelseskonstant vha. Poisson's linging:\n\n$\\{f,g\\} = \\sum\\limits_{p,q}^{n} \\left(\\frac{\\partial f}{\\partial p} \\frac{\\partial g}{\\partial q} - \\frac{\\partial f}{\\partial q}\\frac{\\partial q}{\\partial p} \\right)$\n\nhvor $q$ og $p$ er de generalisrede koordinater og impulser fra Hamilton mekanik.\n\n}\n\n## L4 og L5 intuition?\n\nPå Wikipedia [forklares](https://en.wikipedia.org/wiki/Lagrangian_point#L4_and_L5) intuitionen bag L4 og L5. Der påstas at L4 og L5 findes der hvor linjene mellem M1, M2 og L4/L5 danner en ligebenet trekant. Men hvis det var tilfældet, ville L4/L5 så ikke ligge PÅ M2's cirkulære bane (hvis vi sager at den var det)? Når L4 og L5 på [denne figur](https://en.wikipedia.org/wiki/Lagrangian_point#/media/File:Lagrange_points2.svg) ligner at de ligger udenfor jordens bane, er det kun fordi jordens bane i figuren er elliptisk, eller er det meningen at de skal ligge udenfor selv når M2 er i cirkulær bane om M1?\n\n#### Poul svarer\n\n\\leavevmode {\\color{red}\nJordens kraft giver en lille komposant i retning ind imod solen / CM, og dermed er der lidt mere centripetalkraft end der havde været uden jorden, så L4 og L5 ligger lidt længere ude for at have lidt mere centrifugalkraft for at modvirke den ekstra kraft.\n}\n\n## Coriolis kraft: konservativ eller ej?\nI artiklen for [Roche lobe](https://en.wikipedia.org/wiki/Roche_lobe) står der:\n> (...) the Coriolis force is a non-conservative force (i.e. not representable by a scalar potential).\n\nMen omvendt kunne man også sige at Coriolis kraften altid virker vinkelret på hastighedsvektoren, og den kan dermed ikke udføre arbejde. Jeg troede egentlig altid at alle de forskellige egenskaber ved konservative krafter var givet når en af dem var givet.\n\nEr coriolis konservativ eller ej?\n\n#### Poul svarer\n\n\\leavevmode {\\color{red}\n\n$F = \\nabla f$ er det strenge kriterium ift. alle egenskaberne ved konservativ kraft, som illustreret her:}\n\n![Pasted Graphic](assets/Pasted Graphic.tiff)\n\n# Diskussionspunkter / Information\n\n## Mars LETO infeasibilty?\n\n### Negative forventninger\n> Due to the long time needed to achieve the low energy transfers between planets, the Interplanetary Superhighway is impractical for transfers such as from Earth to Mars at present.\n- http://www2.esm.vt.edu/~sdross/superhighway/description.html\n\nJeg tror dog det er skrevet for et stykke tid siden, og har andre kilder der siger at der antyder at der er lovende muligheder, se næste punkt.\n\nLigeledes står der på [Wikipedia](https://en.wikipedia.org/wiki/Interplanetary_Transport_Network#Paths), dog uden kildeanvisning, følgende:\n\n> With careful calculation, one can pick which outbound path one wants. This turned out to be useful, as many of these paths lead to some interesting points in space, such as the Earth's Moon or between the Galilean moons of Jupiter.[8] As a result, for the cost of reaching the Earth–Sun L2 point, which is rather low energy value, one can travel to a number of very interesting points for a little or no additional fuel cost. **But the trip from Earth to Mars or other distant location would likely take thousands of years.**\n\nDet tyder også på at vi ift. Mars nok bliver nødt til at inkorporere bi-directional search igennem Lagrange punkter, og dermed et ekstra burn i Lagrange punkter?\n\n### L4/L5 til Mars?\n\nDog [denne](https://en.wikipedia.org/wiki/Orbital_mechanics#Interplanetary_Transport_Network_and_fuzzy_orbits) artikel lidt mere optimistisk (der er dog også en \"citation needed\" på, så det er lidt usikkert):\n\n> It is now possible to use computers to search for routes using the nonlinearities in the gravity of the planets and moons of the Solar System. For example, it is possible to plot an orbit from high earth orbit to Mars, passing close to one of the Earth's Trojan points.[citation needed]\n\nNoget kunne tyde på at vi måske skal vi ud forbi Sol-Jord systemets L4 og/eller L5 punkter.\n\n(i [astronomi](https://en.wikipedia.org/wiki/Trojan_(astronomy)) kalder man L4 for \"greek camp\" og L5 for \"trojan camp.\")\n\n### LETO to Deimos & Phobos\nI artiklen for [LETO](https://en.wikipedia.org/wiki/Low-energy_transfer#Delta-v_savings), citeres nogen der har kigget på LETO til Mars' måner med fine resultater:\n\n> For rendezvous with the Martian moons, the savings are 12% for Phobos and 20% for Deimos. Rendezvous is targeted because the stable pseudo-orbits around the Martian moons do not spend much time within 10 km of the surface.[9]\n\nJeg kiggede i deres abstract, og de påstar: \"WSB transfer lasts ~90 days\" - jeg vil tro at disse 3 måneder er oven i de ~7-9 måneder det tager at flyve til Mars via Hohmann.\n\n### Mini konklusion\nDet virker alt i alt som om at LETO til Mars ikke er noget vi skal opgive på forhånd umiddelbart\n" }, { "alpha_fraction": 0.8679245114326477, "alphanum_fraction": 0.8679245114326477, "avg_line_length": 5.75, "blob_id": "53855df6573fb439b79eab6bc8c746171715c91a", "content_id": "bd40cc319d4c14b5b0526b5fbdbb00372dfe97de", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 53, "license_type": "permissive", "max_line_length": 10, "num_lines": 8, "path": "/requirements.txt", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "pyKep\npygmo\nmatplotlib\nnumpy\nscipy\npytest\nblack\nnumba" }, { "alpha_fraction": 0.5059945583343506, "alphanum_fraction": 0.5613079071044922, "avg_line_length": 24.83098602294922, "blob_id": "ddba373bcb7acb21a7caccaefcf5d552b4c933f9", "content_id": "3a6afb334e8789054bfd823975e614b3230691ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3670, "license_type": "permissive", "max_line_length": 87, "num_lines": 142, "path": "/code/pyscripts/pygmo_rocketry.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[5]:\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom IPython import display\nimport numpy as np\n\n# %matplotlib inline\nimport pygmo as pg\nfrom pygmo import algorithm\nimport os\nimport sys\nimport json\nfrom orbsim.r3b_2d import UNIT_TIME\nfrom orbsim.r3b_2d.analyticals import *\nfrom orbsim.r3b_2d.simulators import run_sim\nfrom orbsim.plotting import orbitplot2d, orbitplot_non_inertial\nimport time\nfrom random import shuffle\nfrom numba import jit\nimport math\nfrom math import pi\n\npi8 = pi / 8\npi4 = pi / 4\npi2 = pi / 2\n\n\n# In[6]:\n\n\nclass saddle_space:\n def __init__(self):\n self.dim = 3\n\n def fitness(self, psi):\n res, _ = run_sim(psi, duration=50 / UNIT_TIME, max_iter=1e7)\n return [-res]\n\n @jit\n def get_bounds(self):\n return ([-pi, -pi8, 2], [pi2, pi4, 4])\n\n def get_name(self):\n return f\"saddlespace\"\n\n def plot(self, w, idx):\n pass\n\n\n# In[7]:\n\n\nclass salimans_nes:\n def __init__(self, iter=10):\n super(salimans_nes, self).__init__()\n\n self.iter = iter # number of steps towards estimated gradient\n\n def evolve(self, pop):\n if len(pop) == 0:\n return pop\n sigma = 0.001\n alpha = 0.003 # learningrate\n\n # for each iteration, jitter around starting points, and move in the\n # best direction (weighted average jitter coordinates according to\n # fitness score)\n for i in range(self.iter):\n\n # get the population\n wl = pop.get_x()\n\n # do the jittering and selection\n j = 0\n for w in wl:\n # print(f\"mutating {str(w)}\")\n noise = np.random.randn(10, 3)\n wp = [[x, y, z] for [x, y, z] in np.expand_dims(w, 0) + sigma * noise]\n # print(np.expand_dims(w, 0) + sigma * noise)\n\n R = np.array([-run_sim(wi, max_iter=1e7)[0] for wi in wp])\n R -= R.mean()\n R /= R.std()\n g = np.dot(R, noise)\n # print(f\"R = {R}, g = {g}\")\n u = alpha * g\n print(f\"new individual = {str(u)}\")\n w += u # mutate the population/take the step\n\n pop.set_x(j, w) # make the move previously selected\n j += 1\n return pop\n\n def get_name(self):\n return f\"Salimans_NES\"\n\n\n# In[13]:\n\n\ndef pygmo_es():\n ARCHIPELAGO = True\n iterations = 10\n\n uda = salimans_nes(iter=iterations) # user defined algorithm\n udp = saddle_space() # user defined problem\n prob = pg.problem(udp) # Beautiful white snow\n\n if ARCHIPELAGO:\n archi = pg.archipelago(algo=uda, prob=prob, n=1, pop_size=4)\n archi.evolve()\n # print(archi) # prints description of islands contained in archipelago\n # archi.wait()\n sols = archi.get_champions_f()\n idx = sols.index(min(sols))\n sols_x = archi.get_champions_x()\n sol = sols[idx], sols_x[idx]\n else:\n pop = pg.population(prob=prob, size=4)\n pop.set_x(0, [-2.277654673852600, 0.047996554429844, 3.810000000000000])\n pop.set_x(1, [-0.138042744751570, -0.144259374836607, 3.127288444444444])\n pop.set_x(2, [-2.086814820119193, -0.000122173047640, 3.111181716545691])\n uda.evolve(pop)\n sol = (pop.champion_f, pop.champion_x)\n\n print(\"Done! best solution is:\")\n print(sol)\n return sol\n\n\n# In[ ]:\n\n\nif __name__ == \"__main__\":\n Dv, psi = pygmo_es()\n path = run_sim(psi)\n orbitplot2d(path, psi)\n orbitplot_non_inertial(path, psi)\n\n" }, { "alpha_fraction": 0.4551074504852295, "alphanum_fraction": 0.569325864315033, "avg_line_length": 35.52688217163086, "blob_id": "21882242c1c1c8946c538395e5130400e5d95322", "content_id": "dc79cc20a1b6349aaa8551e4ab1fc28ea0c65a83", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3397, "license_type": "permissive", "max_line_length": 99, "num_lines": 93, "path": "/code/run_sim.py", "repo_name": "gandalfsaxe/letomes", "src_encoding": "UTF-8", "text": "import time\n\nfrom orbsim.r3b_2d import *\nfrom orbsim.r3b_2d.plotting import orbitplot2d, orbitplot_non_inertial, leo_plot\nfrom orbsim.r3b_2d.simulators import run_sim\nfrom argparse import ArgumentParser\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n \"-p\",\n metavar=\"paths\",\n type=str,\n nargs=\"+\",\n help='the premade paths you want to check. options are \"leo, llo, h, rh, ls, ll, 3h, 1h\".',\n )\n parser.add_argument(\n \"--psi\",\n metavar=\"in_psi\",\n type=float,\n nargs=\"+\",\n help=\"the position, angle and burn magnitude you want to simulate\",\n )\n args = parser.parse_args()\n paths = args.p\n in_psi = args.psi\n # for x in range(10):\n # psi = [rand() *2*pi, rand() * 2* pi, rand() * 4 / unit_velocity]\n # path = launch_sim(psi)\n # orbitplot2d(path, psi)\n # starttime=time.time()\n # psi = [-2.282942228154665, 0.0000, -31.49483130653266 / unit_velocity]\n # launch_sim(psi,max_iter=1000000)\n # print(round(time.time() - starttime, 3))\n\n starttime = time.time()\n if in_psi is not None and len(in_psi) == 3:\n # user defined\n psi = in_psi\n path = run_sim(psi, duration=100)\n orbitplot2d(path, psi, title=\"userdef\")\n orbitplot_non_inertial(path, psi, title=\"userdef\")\n if paths is not None:\n if \"leo\" in paths:\n # leo\n psi = [0.0, 0.0, 0.0]\n path = run_sim(psi, duration=0.0625)\n leo_plot(path, psi, title=\"leo\")\n\n if \"h\" in paths:\n # hohmann\n psi = [-2.086814820119193, -0.000122173047640, 3.111181716545691]\n path = run_sim(psi, duration=5)\n orbitplot2d(path, psi, title=\"hohmann\")\n orbitplot_non_inertial(path, psi, title=\"hohmann\")\n\n if \"rh\" in paths:\n # reverse hohmann\n psi = [-2.282942228154665, 0.000000000000000, -3.149483130653266]\n path = run_sim(psi, duration=1)\n orbitplot2d(path, psi, title=\"reverse hohmann\")\n orbitplot_non_inertial(path, psi, title=\"reverse_hohmann\")\n\n if \"ll\" in paths:\n # low energy long\n psi = [3.794182930145708, 0.023901745288554, 3.090702702702703]\n path = run_sim(psi, duration=200)\n orbitplot2d(path, psi, title=\"LE long\")\n orbitplot_non_inertial(path, psi, title=\"LE long\")\n\n if \"ls\" in paths:\n # low energy short\n psi = [-0.138042744751570, -0.144259374836607, 3.127288444444444]\n path = run_sim(psi, duration=41)\n orbitplot2d(path, psi, title=\"LE short\")\n orbitplot_non_inertial(path, psi, title=\"LE short\")\n\n if \"3h\" in paths:\n # 3-day-hohmann\n psi = [-2.272183066647597, -0.075821466029764, 3.135519748743719]\n path = run_sim(psi, duration=3)\n orbitplot2d(path, psi, title=\"3-day-hohmann\")\n orbitplot_non_inertial(path, psi, title=\"3-day-hohmann\")\n\n if \"1h\" in paths:\n # 1-day-hohmann\n psi = [-2.277654673852600, 0.047996554429844, 3.810000000000000]\n path = run_sim(psi, duration=1)\n orbitplot2d(path, psi, title=\"1-day-hohmann\")\n orbitplot_non_inertial(path, psi, title=\"1-day-hohmann\")\n\n print(round(time.time() - starttime, 3))\n" } ]
72
Mohan-Palat/mohan-learn-python
https://github.com/Mohan-Palat/mohan-learn-python
16fe1d9ce5066ac1beb3a275ed730d3bb50289f8
473ff343b0b8d4b9f5ded763895bc049a57d013a
da3f19f6ba52ffc3cb79f8ee6c92fa52b353153c
refs/heads/main
2023-01-23T23:25:29.162355
2020-11-26T01:44:01
2020-11-26T01:44:01
316,091,218
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44306930899620056, "alphanum_fraction": 0.5074257254600525, "avg_line_length": 18.100000381469727, "blob_id": "7676bd9be4a2f57b6cef516915454a915240c3d8", "content_id": "88f65aae03541f051a54f7845183b095f6e86d13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 83, "num_lines": 20, "path": "/0004_Repetition_Operator.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\ni = 25\r\ns = 15 * '0'\r\n\r\nprint '015 Decimal = %015d' % i\r\n\r\nprint 'Repeat 0 15 = %s' % s\r\n\r\ns += str(i)\r\nprint 's += i = %s' % s\r\n\r\nprint 'Slice Only = %s' % s[-15:] \r\n\r\nprint 'Slice Repet = %s' % (15 * '0' + str(i))[-15:]\r\n\r\na = (15 * '0' + str(i))[-15:]\r\nprint 'Assigned = ' + a\r\n\r\n# In this example, printf is simpler, but for assigning we need the Slice Operation\r\n\r\n" }, { "alpha_fraction": 0.4438437521457672, "alphanum_fraction": 0.45605161786079407, "avg_line_length": 44.41295623779297, "blob_id": "af2a3b4006e19e2e7a31cab8217cde5c3a3b47e7", "content_id": "a96e6b276ddb5a4bb391b422db57fdbbf57fb916", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11468, "license_type": "no_license", "max_line_length": 134, "num_lines": 247, "path": "/0013_Load_Perf_HistOo.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\n# +-----------------------------------------------------------------------------------------+\r\n# | U\\ 2018-03-12 |\r\n# | Load_Perf_Oo.py (Oo - Object Oriented Version - Closer to C++) Mohan Palat |\r\n# | |\r\n# | Purpose: |\r\n# | Markit provided 121 Performance text files, with header and trailer, zipped. |\r\n# | We need to a) Unzip b) strip header/trailer c) load each file into stage |\r\n# | Once all 121 files are loaded into the stage table, the regular stage to core DataStage |\r\n# | process is executed to load data from all 121 files into the core table |\r\n# | This Python program does the following |\r\n# | 1. Truncate the stage table |\r\n# | 2. Foreach Zip from the Cyberfusion Folder |\r\n# | Unzip |\r\n# | Remove header trailer (Collect the footer record count to reconcile at end) |\r\n# | Load into stage table |\r\n# | Remove the large intermediate files created |\r\n# | 3. Total up the footer record counts for reconciling with the loaded rec count |\r\n# | For every command executed, throws an exception if the command did not work. |\r\n# | |\r\n# | Parameter: |\r\n# | A number N which controls the number of files out of all the files located |\r\n# | Defaults to 1. If you pass a higher number, it does not try beyond the files found |\r\n# | |\r\n# | Log: |\r\n# | Writes to stdout, Use thusly to write into a log file |\r\n# | Load_Perf_Oo.py 121 | tee Load_Perf_2018-03-12-03-00-PM.log |\r\n# | |\r\n# | Modifications: |\r\n# | |\r\n# | Who Date Change |\r\n# | ----------- -------- ------------------------------------------------------------------ |\r\n# | Mohan Palat 20180312 Initial Revision |\r\n# +-----------------------------------------------------------------------------------------+\r\n\r\nimport sys\r\nimport datetime\r\nimport os\r\n\r\nclass LoadPerformance(object):\r\n \"\"\" Class to load a Performance Zip File From Markit into IODS \"\"\"\r\n\r\n ppbin = '/usr/local/dstage/INVODS/batch/PreProcHeaderTrailer '\r\n cybfldr = '/usr/local/dstage/INVODS/cyberf/MARKIT/'\r\n stgfldr = '/usr/local/dstage/INVODS/cyberf/stage/MARKIT/'\r\n wrkfldr = '/usr/local/dstage/INVODS/WorkFiles/'\r\n dsshell = 'sh /usr/local/dstage/INVODS/batch/INVODS_generic.sh '\r\n dbprop = '/usr/local/dstage/INVODS/batch/IODSDB.prop'\r\n stgtbl = ' MOHAN.T_STAGE_PERFORMANCE_HISTORY '\r\n\r\n def __init__(self, numzips=1):\r\n \"\"\" Initialize with Number of Zips from available list for the object \"\"\"\r\n print '\\n\\n<INI>\\n%s\\n%s\\n%s\\n\\n' % ('=' * 26, datetime.datetime.now(), '=' * 26)\r\n self.numzips = numzips\r\n print \"Number of Performance zip files to be preocessed %d\" % self.numzips\r\n self.i = 0\r\n self.tf = 0\r\n sys.stdout.flush()\r\n self.truncate_stage_table()\r\n\r\n def truncate_stage_table(self):\r\n \"\"\" Truncate Stage Table before loading the Markit Files \"\"\"\r\n self.dbdict = { 'init': 'dummy' }\r\n try:\r\n fo = open(self.dbprop, 'r')\r\n for eachline in fo:\r\n if eachline.find(\"\\n\") == -1:\r\n self.dbdict[ eachline.split('=')[0] ] = eachline.split('=')[1]\r\n else: \r\n self.dbdict[ eachline.split('=')[0] ] = eachline.split('=')[1][:-1]\r\n fo.close()\r\n except IOError, e:\r\n print 'File Processing Error with DB Control: ', e, '\\n\\n'\r\n sys.stdout.flush()\r\n sys.exit(1)\r\n # print self.dbdict\r\n # 1/3 Connect\r\n self.oscmd = 'db2 connect to ' + self.dbdict['DB_NAME'] + ' user ' + self.dbdict['DB_ID'] + ' using ' + self.dbdict['DB_PSWD']\r\n # print self.oscmd\r\n self.result = os.system(self.oscmd)\r\n print 'Result for db2 connect = ', self.result\r\n self.res = self.result != 0\r\n if self.res: # Failure\r\n print 'Unable to continue'\r\n sys.exit(1)\r\n # 2/3 Execute SQL\r\n self.oscmd = ' db2 TRUNCATE ' + self.stgtbl + ' REUSE STORAGE IMMEDIATE '\r\n # print self.oscmd\r\n self.result = os.system(self.oscmd)\r\n print 'Result for db2 truncate stage = ', self.result\r\n self.res = self.result != 0\r\n if self.res: # Failure\r\n print 'Unable to continue'\r\n sys.exit(1)\r\n # 3/3 Terminate\r\n self.oscmd = ' db2 terminate '\r\n # print self.oscmd\r\n self.result = os.system(self.oscmd)\r\n print 'Result for db2 terminate = ', self.result\r\n sys.stdout.flush()\r\n self.res = self.result != 0\r\n if self.res: # Failure\r\n print 'Unable to continue'\r\n sys.exit(1)\r\n \r\n return self.res\r\n\r\n def reinit(self):\r\n \"\"\" Reinitialize method for each file processed \"\"\"\r\n self.tf = self.i\r\n print '=' * (len(self.f)+5)\r\n print '%03d. %s' % (self.i, self.f)\r\n print '=' * (len(self.f)+5)\r\n sys.stdout.flush()\r\n\r\n def unzip(self):\r\n \"\"\" Unzip a zip file \"\"\"\r\n self.uzipfil = self.wrkfldr + self.f[:-4]\r\n print 'Unzipped file is', self.uzipfil\r\n self.oscmd = 'unzip -jo ' + self.cybfldr + self.f + ' -d ' + self.wrkfldr\r\n print self.oscmd\r\n self.result = os.system(self.oscmd)\r\n print 'Result for unzip = ', self.result\r\n sys.stdout.flush()\r\n self.res = self.result != 0\r\n if self.res: # Failure\r\n print 'Unable to continue'\r\n return self.res\r\n\r\n def collect_tail(self):\r\n \"\"\" Collect the record count from trailer record \"\"\"\r\n if self.i == 1:\r\n self.oscmd = 'tail -v -n1 ' + self.uzipfil + ' > ' + self.wrkfldr + 'Load_Perf_Tail.log'\r\n else:\r\n self.oscmd = 'tail -v -n1 ' + self.uzipfil + ' >> ' + self.wrkfldr + 'Load_Perf_Tail.log'\r\n self.result = os.system(self.oscmd)\r\n print 'Result for Tail = ', self.result\r\n sys.stdout.flush()\r\n self.res = self.result != 0\r\n if self.res: # Failure\r\n print 'Unable to continue'\r\n return self.res\r\n self.oscmd = 'echo >> ' + self.wrkfldr + 'Load_Perf_Tail.log'\r\n self.result = os.system(self.oscmd)\r\n print 'Result for >> Load_Perf_Tail.log', self.result\r\n sys.stdout.flush()\r\n self.res = self.result != 0\r\n if self.res: # Failure\r\n print 'Unable to continue'\r\n return self.res\r\n\r\n def strip_ht(self):\r\n \"\"\" Strip Header and Trailer \"\"\"\r\n self.stgfil = self.stgfldr + self.f[:-4]\r\n print 'Staged file is', self.stgfil\r\n self.oscmd = self.ppbin + ' -i ' + self.uzipfil + ' -o ' + self.stgfil + ' -s > /dev/null '\r\n result = os.system(self.oscmd)\r\n print 'Result for Strip HT = ', self.result\r\n sys.stdout.flush()\r\n self.res = self.result != 0\r\n if self.res: # Failure\r\n print 'Unable to continue'\r\n return self.res\r\n\r\n def call_datastage_job(self):\r\n \"\"\" Call DataStage Job to Load the Performance Stage Table \"\"\"\r\n # self.dspar1 = ' \"INVODS_010100_MARKIT_PERF_NONGIA_STG\" '\r\n # self.dspar1 = ' \"INVODS_010105_MARKIT_PERF_NONGIA_STG_NO_TRUNCATE\" '\r\n self.dspar1 = ' \"MO_INVODS_010105_MARKIT_PERF_NONGIA_STG_NO_TRUNCATE\" '\r\n self.dspar2 = '\"-param jpFileName='\r\n self.dspar2 += \"'\" + self.f[:-4] + \"'\" + '\"'\r\n self.oscmd = self.dsshell + self.dspar1 + self.dspar2\r\n self.result = os.system(self.oscmd)\r\n print 'Result for DataStage Load Stage = ', self.result\r\n # print oscmd\r\n sys.stdout.flush()\r\n self.res = self.result != 0\r\n if self.res: # Failure\r\n print 'Unable to continue'\r\n sys.stdout.flush()\r\n return self.res\r\n\r\n def cleanup(self):\r\n \"\"\" Remove staged and unzipped file before working with next zip file \"\"\"\r\n print \"Removing \", self.uzipfil\r\n os.remove(self.uzipfil)\r\n print \"Removing \", self.stgfil\r\n os.remove(self.stgfil)\r\n print '\\n%s\\n%s\\n%s\\n' % ('=' * 26, datetime.datetime.now(), '=' * 26)\r\n sys.stdout.flush()\r\n\r\n def reconcile_reccount(self):\r\n \"\"\" Add record counts from collected trailer record to verify that all got loaded \"\"\"\r\n # from ibm_db import connect\r\n print \"\\n============ Trailer Records Log =================\\n\" \r\n sys.stdout.flush()\r\n oscmd = 'cat ' + self.wrkfldr + 'Load_Perf_Tail.log'\r\n result = os.system(oscmd)\r\n print \"\\n\"\r\n sys.stdout.flush()\r\n try:\r\n fo = open(self.wrkfldr + 'Load_Perf_Tail.log', 'r')\r\n i = 0\r\n tp = 0\r\n for eachline in fo:\r\n i += 1\r\n if i % 2 == 0:\r\n tp += int(eachline[25:-1])\r\n fo.close()\r\n print 'Total Performance Records Received from Markit: %d in %d files' % (tp, self.tf) \r\n sys.stdout.flush()\r\n except IOError, e:\r\n print 'File Processing Error: ', e\r\n\r\n def Go(self):\r\n \"\"\" Loop through each Zip file and process them \"\"\"\r\n l = os.listdir(self.cybfldr)\r\n l.sort();\r\n for self.f in l:\r\n if self.f.endswith('.zip'):\r\n self.i += 1\r\n if self.i <= self.numzips:\r\n self.reinit()\r\n if self.unzip():\r\n break\r\n if self.collect_tail():\r\n break\r\n if self.strip_ht():\r\n break\r\n if self.call_datastage_job():\r\n break\r\n self.cleanup()\r\n self.reconcile_reccount()\r\n\r\ndef main():\r\n \"\"\" Main (From my beloved C++) \"\"\"\r\n if len(sys.argv) > 1: \r\n lp = LoadPerformance(int(sys.argv[1]))\r\n else:\r\n lp = LoadPerformance()\r\n print '\\n\\n<BOP>\\n%s\\n%s\\n%s\\n\\n' % ('=' * 26, datetime.datetime.now(), '=' * 26)\r\n lp.Go()\r\n print '\\n\\n<EOP>\\n%s\\n%s\\n%s\\n\\n' % ('=' * 26, datetime.datetime.now(), '=' * 26)\r\n sys.exit(0)\r\n\r\nmain()\r\n\r\n\r\n" }, { "alpha_fraction": 0.6033898591995239, "alphanum_fraction": 0.6033898591995239, "avg_line_length": 18.928571701049805, "blob_id": "28f660e5f70d77dff4ee25f3bd5e720a72c2d7ac", "content_id": "51c08427f00f9a33be5fc0b26af6d9d722477988", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 53, "num_lines": 14, "path": "/0010_For_And_CodeBlocks.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\ntoDay = 'Monday'\r\n\r\nif toDay == 'Monday':\r\n print 'Chan will ignore us completely'\r\n print 'Can\\'t wait for the day to end'\r\nelif toDay == 'Tuesday':\r\n print 'Arg !!'\r\nelse:\r\n print 'Hello World'\r\n\r\n\r\n# Block of code is a suite - In this case an if suite\r\n\r\n" }, { "alpha_fraction": 0.5483180284500122, "alphanum_fraction": 0.5831804275512695, "avg_line_length": 39.367088317871094, "blob_id": "86da43f53af3eb7a6d6ef955df2d5d61d5afeb52", "content_id": "ce5d1b880b5bba1df6f74fd8ebdf06b870817966", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3270, "license_type": "no_license", "max_line_length": 119, "num_lines": 79, "path": "/0006_Numbers.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\nimport decimal\r\n\r\ni = 25\r\nl = 2000000000000000000L\r\nb = True\r\nf = 25.12745678\r\nc = 6.23 + 1.5j\r\n\r\nprint 'I = %d, L = %ld %ld' % (i, l, l + 9876543210)\r\n\r\nprint b\r\nprint \"%d\" % b \r\nb = not b\r\nprint b\r\nprint \"%d\" % b \r\n\r\nprint 'Format 5.3f %5.3f' % f\r\nprint 'Format 05.3f %05.3f' % f\r\n\r\nTWOPLACES = decimal.Decimal('0.01')\r\nd = decimal.Decimal(str(f)).quantize(TWOPLACES)\r\nprint d\r\nFIVEPLACES = decimal.Decimal('0.00001')\r\nd = decimal.Decimal(str(f)).quantize(FIVEPLACES)\r\nprint d\r\n\r\n# String Formatting\r\n# https://docs.python.org/2/library/string.html#format-string-syntax\r\npi = 3.141592653589793\r\nprint '{0:.2f}'.format(pi)\r\n\r\n# Integer\r\n# Long (As much as there is virtual memory)\r\n# Boolean (True - 1, False - 0)\r\n# Floating Point\r\n# Complex\r\n# Decimal implemented as a class for more accuracy\r\n# All numeric types (except complex) support the following operations, sorted by ascending priority \r\n# (operations in the same box have the same priority; \r\n# all numeric operations have a higher priority than comparison operations): \r\n# \r\n# Operation Result Notes\r\n# -------------- ---------------------------------------- -----\r\n# x + y sum of x and y \r\n# x - y difference of x and y \r\n# x * y product of x and y \r\n# x / y quotient of x and y -1\r\n# x // y (floored) quotient of x and y -5\r\n# x % y remainder of x / y -4\r\n# -x x negated \r\n# +x x unchanged \r\n# abs(x) absolute value or magnitude of x \r\n# int(x) x converted to integer -2\r\n# long(x) x converted to long integer -2\r\n# float(x) x converted to floating point \r\n# complex(re,im) a complex number with real part re, \r\n# imaginary part im. im defaults to zero. \r\n# c.conjugate() conjugate of the complex number c \r\n# divmod(x, y) the pair (x // y, x % y) (3)(4)\r\n# pow(x, y) x to the power y \r\n# x ** y x to the power y \r\n# \r\n# Notes: \r\n# (1) For (plain or long) integer division, the result is an integer. \r\n# The result is always rounded towards minus infinity: 1/2 is 0, (-1)/2 is -1, 1/(-2) is -1, and (-1)/(-2) is 0. \r\n# Note that the result is a long integer if either operand is a long integer, regardless of the numeric value. \r\n# (2) Conversion from floating point to (long or plain) integer may round or truncate as in C; \r\n# see functions floor() and ceil() in the math module for well-defined conversions. \r\n# (3) See section 2.1, ``Built-in Functions,'' for a full description. \r\n# (4) Complex floor division operator, modulo operator, and divmod(). \r\n# Deprecated since release 2.3. Instead convert to float using abs() if appropriate.\r\n# (5) Also referred to as integer division. The resultant value is a whole integer, \r\n# though the result's type is not necessarily int. \r\n# \r\n# The decimal module was designed to support \r\n# \"without prejudice, both exact unrounded decimal arithmetic (sometimes called fixed-point arithmetic) \r\n# and rounded floating-point arithmetic.\"\r\n\r\n" }, { "alpha_fraction": 0.6009244918823242, "alphanum_fraction": 0.6271186470985413, "avg_line_length": 24.95833396911621, "blob_id": "a2ff34977b8de6c9635bfc4dadcac48a652045a8", "content_id": "f754e382baf45c67e220a1ad1a6378d8b23b5f92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 649, "license_type": "no_license", "max_line_length": 83, "num_lines": 24, "path": "/0008_Tuples_And_Slicing.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\naTuple = ( 0, 1, 'Two', 3, 'Four', 'V', 6 )\r\neTuple = ( 'A', 'B', 'C' )\r\n\r\nprint aTuple\r\n\r\nprint \"0th element = %s\" % aTuple[0]\r\nprint \"5th element = %s\" % aTuple[5]\r\nprint \"Last element = %s\" % aTuple[-1]\r\n\r\nprint \"2nd-4th = \", aTuple[2:5]\r\nprint \"0th-Last but one = \", aTuple[:-1]\r\n\r\nprint \"aTuple[0] = 'Zero' is illegal\"\r\n\r\nprint \"aTuple.insert(-1, 'Actual Six') is illegal\"\r\n\r\nprint \"aTuple.extend(eTuple) is illegal\"\r\n\r\n# Tuples are generic arrays like lists, but read-only\r\n# Syntax (\r\n# https://developers.google.com/edu/python\r\n# Unlike list, for tuple, all elements are not converted to string when using print\r\n\r\n" }, { "alpha_fraction": 0.671875, "alphanum_fraction": 0.6763392686843872, "avg_line_length": 19.238094329833984, "blob_id": "44fbcfc101f3d0962470cefb802f3e2b810a06bf", "content_id": "a700141214cfe5b674359dfe9573298d45b41838", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 448, "license_type": "no_license", "max_line_length": 56, "num_lines": 21, "path": "/0009_Dicts.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\naDict = { 'host': 'earth' }\r\nprint aDict\r\n\r\naDict['port'] = 80\r\nprint aDict\r\n\r\naDict['Hello'] = 'World'\r\nprint aDict\r\n\r\nprint \"print aDict[:] is illegal, Dicts are unslashable\"\r\n\r\n# Dict is short for Dictionary\r\n# Syntax {\r\n# Unslashable\r\n# Python's mapping type\r\n# Works like associative arrays or hashes of Pearl\r\n# Key Value Pairs\r\n# Key - Python type (Mostly numbers or string)\r\n# Value - Any arbitary Python Object\r\n\r\n" }, { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.761904776096344, "avg_line_length": 20, "blob_id": "cd4ed34653d216035aa69665c9e267a30839e06c", "content_id": "8ed90dcc6bc66f962cecdcc44c38f4285ab2960f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 21, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/README.md", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "# mohan-learn-python\n" }, { "alpha_fraction": 0.570135772228241, "alphanum_fraction": 0.6244344115257263, "avg_line_length": 29.285715103149414, "blob_id": "1be2ef6253390aa1ade09729b4e030975f823a82", "content_id": "b3286bca2c03a3de1a3e359b9de5b948f123dd10", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 78, "num_lines": 7, "path": "/0005_Input_Process_Output.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\nmiles = raw_input('Enter Miles: ')\r\n\r\nprint '%f miles is %f kilometers' % (float(miles), 1.609 * float(miles))\r\n\r\nprint '%7.3f miles is %7.3f kilometers' % (float(miles), 1.609 * float(miles))\r\n\r\n" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.644444465637207, "avg_line_length": 14.5, "blob_id": "6761253676948192979e0f6481e44541dd8c990e", "content_id": "548df37e92a6951abf9c1684b3f81edaaee80df5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/0001_Hello_World.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\nprint 'Hello World' \r\n\r\nmyString = 'Hello World!'\r\n\r\nprint myString \r\nprint 'Say\\n%25s\\nAgain' % myString \r\n\r\n" }, { "alpha_fraction": 0.578163743019104, "alphanum_fraction": 0.5947063565254211, "avg_line_length": 39.620689392089844, "blob_id": "29c2d803e0fb83dd12cba76791abdd846d8c7b8e", "content_id": "aa05c5bf475f407f2569dcf7f3b87e5b7737d9ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1209, "license_type": "no_license", "max_line_length": 71, "num_lines": 29, "path": "/0014_MPI_DDL.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\nimport datetime\r\n\r\ni = 0\r\nfobo = open('/prustaff/x084978/Python/MPI/MPI_DML.SQL', 'w')\r\nfobo.write('\\n\\n-- IODS DML @ %s\\n\\n' % datetime.datetime.now())\r\nfor f in ['INSERT_IODSMANUAL.T_MASTER_DDA_NO_RANK_INVESTMENT.sql',\r\n 'INSERT_IODSMPI.T_REF_ASSET_CLASS_PEER_XREF.SQL',\r\n 'INSERT_IODSMPI.T_REF_LIPPERCATEGORY_PEER_XREF.SQL',\r\n 'INSERT_IODSMPI.T_REF_LIPPER_CATEGORY.SQL',\r\n 'INSERT_IODSMPI.T_REF_MPI_HEADER_BENCHMARK_PERFORMANCE.SQL',\r\n 'INSERT_IODSMPI.T_REF_MPI_HEADER_INVESTMENT_PERFORMANCE.SQL',\r\n 'INSERT_IODSMPI.T_REF_MPI_STATISTICS_TYPES.SQL',\r\n 'INSERT_IODSMPI.T_REF_MPI_STUDY_TYPES.SQL',\r\n 'INSERT_IODSMPI.T_REF_MSTARCATEGORY_PEER_XREF.SQL',\r\n 'INSERT_IODSMPI.T_REF_PEER_TYPE_.SQL',\r\n 'INSERT_IODSMPI.T_REF_PEER_UNIVERSE.SQL'\r\n ]:\r\n fn = '/prustaff/x084978/Python/MPI/DML Scripts/' + f\r\n i += 1\r\n print 'Processing %02d: %s' % (i, fn)\r\n fobi = open(fn, 'r')\r\n fobo.write('\\n-- Begin %02d: %s --\\n\\n' % (i, fn))\r\n for eachLine in fobi:\r\n fobo.write(eachLine)\r\n fobi.close() \r\n fobo.write('\\n-- End %02d: %s --\\n\\n' % (i, fn))\r\nfobo.close()\r\n\r\n" }, { "alpha_fraction": 0.5961039066314697, "alphanum_fraction": 0.6246753334999084, "avg_line_length": 24.482759475708008, "blob_id": "d785c83e11f276ccc264612f64ed094e84d2d276", "content_id": "ee10cc6bcc7c56371c008df8c234cb63e1400e48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 770, "license_type": "no_license", "max_line_length": 103, "num_lines": 29, "path": "/0007_Lists_And_Slicing.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\naList = [ 0, 1, 'Two', 3, 'Four', 'V', 6]\r\neList = [ 'A', 'B', 'C' ]\r\n\r\nprint aList\r\n\r\nprint \"0th element = %s\" % aList[0]\r\nprint \"5th element = %s\" % aList[5]\r\nprint \"Last element = %s\" % aList[-1]\r\n\r\nprint \"2nd-4th = %s\" % aList[2:5]\r\nprint \"0th-Last but one = %s\" % aList[:-1]\r\n\r\naList[0] = 'Zero'\r\nprint aList\r\n\r\naList.insert(-1, 'Actual Six')\r\nprint aList\r\n\r\naList.extend(eList)\r\nprint aList\r\n\r\n# Lists are generic arrays\r\n# Syntax = [\r\n# 1.list.append(elem) -- adds a single element to the end of the list. ... \r\n# 2.list.insert(index, elem) -- inserts the element at the given index, shifting elements to the right.\r\n# 3.list.extend(list2) adds the elements in list2 to the end of the list.\r\n# https://developers.google.com/edu/python\r\n\r\n" }, { "alpha_fraction": 0.33324378728866577, "alphanum_fraction": 0.34788447618484497, "avg_line_length": 50.41549301147461, "blob_id": "a2da4b45f61bbab4501a4d26d571b51b1dc31008", "content_id": "757b35df37fc051fe0e65dd0061bae0537744a9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7445, "license_type": "no_license", "max_line_length": 93, "num_lines": 142, "path": "/0012_Load_Perf_Hist_No.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\n# +-----------------------------------------------------------------------------------------+\r\n# | U\\ 2018-03-12 |\r\n# | Load_Perf_No.py (No - Non Object Oriented Version - Closer to UNIX shell) Mohan Palat |\r\n# | |\r\n# | Purpose: |\r\n# | Markit provided 121 Performance text files, with header and trailer, zipped. |\r\n# | We need to a) Unzip b) strip header/trailer c) load each file into stage |\r\n# | Once all 121 files are loaded into the stage table, the regular stage to core DataStage |\r\n# | process is executed to load data from all 121 files into the core table |\r\n# | This Python program does the following |\r\n# | 1. Truncate the stage table |\r\n# | 2. Foreach Zip from the Cyberfusion Folder |\r\n# | Unzip |\r\n# | Remove header trailer (Collect the footer record count to reconcile at end) |\r\n# | Load into stage table |\r\n# | Remove the large intermediate files created |\r\n# | 3. Total up the footer record counts for reconciling with the loaded rec count |\r\n# | For every command executed, throws an exception if the command did not work. |\r\n# | |\r\n# | Parameter: |\r\n# | A number N which controls the number of files out of all the files located |\r\n# | Defaults to 1. If you pass a higher number, it does not try beyond the files found |\r\n# | |\r\n# | Log: |\r\n# | Writes to stdout, Use thusly to write into a log file |\r\n# | Load_Perf_No.py 121 | tee Load_Perf_2018-03-12-03-00-PM.log |\r\n# | |\r\n# | Modifications: |\r\n# | |\r\n# | Who Date Change |\r\n# | ----------- -------- ------------------------------------------------------------------ |\r\n# | Mohan Palat 20180312 Initial Revision |\r\n# +-----------------------------------------------------------------------------------------+\r\n\r\nimport os\r\nimport datetime\r\n\r\ni = 0\r\ntf = 0\r\nppbin = '/usr/local/dstage/INVODS/batch/PreProcHeaderTrailer '\r\ncybfldr = '/usr/local/dstage/INVODS/cyberf/MARKIT/'\r\nstgfldr = '/usr/local/dstage/INVODS/cyberf/stage/MARKIT/'\r\nwrkfldr = '/usr/local/dstage/INVODS/WorkFiles/'\r\ndsshell = 'sh /usr/local/dstage/INVODS/batch/INVODS_generic.sh '\r\n\r\nprint '\\n\\n%s\\n%s\\n%s\\n\\n' % ('=' * 26, datetime.datetime.now(), '=' * 26)\r\n\r\nl = os.listdir(cybfldr)\r\nl.sort();\r\nfor f in l:\r\n if f.endswith('.zip'):\r\n i += 1\r\n if i <= 121:\r\n #########################################\r\n # Init Log #\r\n #########################################\r\n tf = i\r\n print '=' * (len(f)+5)\r\n print '%03d. %s' % (i, f)\r\n print '=' * (len(f)+5)\r\n #########################################\r\n # Unzip #\r\n #########################################\r\n uzipfil = wrkfldr + f[:-4]\r\n print 'Unzipped file is', uzipfil\r\n oscmd = 'unzip -jo ' + cybfldr + f + ' -d ' + wrkfldr\r\n print oscmd\r\n result = os.system(oscmd)\r\n print 'Result for unzip = ', result\r\n if result != 0:\r\n print 'Unable to continue'\r\n break\r\n #########################################\r\n # Collect Tail Rowcount #\r\n #########################################\r\n if i == 1:\r\n oscmd = 'tail -v -n1 ' + uzipfil + ' > ' + wrkfldr + 'Load_Perf_Tail.log'\r\n else:\r\n oscmd = 'tail -v -n1 ' + uzipfil + ' >> ' + wrkfldr + 'Load_Perf_Tail.log'\r\n result = os.system(oscmd)\r\n print 'Result for Tail = ', result\r\n if result != 0:\r\n print 'Unable to continue'\r\n break\r\n oscmd = 'echo >> ' + wrkfldr + 'Load_Perf_Tail.log'\r\n result = os.system(oscmd)\r\n #########################################\r\n # Strip #\r\n #########################################\r\n stgfil = stgfldr + f[:-4]\r\n print 'Staged file is', stgfil\r\n oscmd = ppbin + ' -i ' + uzipfil + ' -o ' + stgfil + ' -s > /dev/null '\r\n result = os.system(oscmd)\r\n print 'Result for Strip = ', result\r\n if result != 0:\r\n print 'Unable to continue'\r\n break\r\n #########################################\r\n # Load Stage DS Job #\r\n #########################################\r\n # dspar1 = ' \"INVODS_010100_MARKIT_PERF_NONGIA_STG\" '\r\n # dspar1 = ' \"INVODS_010105_MARKIT_PERF_NONGIA_STG_NO_TRUNCATE\" '\r\n dspar1 = ' \"MO_INVODS_010105_MARKIT_PERF_NONGIA_STG_NO_TRUNCATE\" '\r\n dspar2 = '\"-param jpFileName='\r\n dspar2 += \"'\" + f[:-4] + \"'\" + '\"'\r\n oscmd = dsshell + dspar1 + dspar2\r\n result = os.system(oscmd)\r\n print 'Result for DataStage Load Stage = ', result\r\n # print oscmd\r\n if result != 0:\r\n print 'Unable to continue'\r\n break\r\n #########################################\r\n # Clean up #\r\n #########################################\r\n print \"Removing \", uzipfil\r\n os.remove(uzipfil)\r\n print \"Removing \", stgfil\r\n os.remove(stgfil)\r\n print '\\n\\n%s\\n%s\\n%s\\n\\n' % ('=' * 26, datetime.datetime.now(), '=' * 26)\r\n\r\nprint \"\\n============ Trailer Records Log =================\\n\" \r\noscmd = 'cat ' + wrkfldr + 'Load_Perf_Tail.log'\r\nresult = os.system(oscmd)\r\nprint \"\\n\"\r\n\r\ntry:\r\n fo = open(wrkfldr + 'Load_Perf_Tail.log', 'r')\r\n i = 0\r\n tp = 0\r\n for eachline in fo:\r\n i += 1\r\n if i % 2 == 0:\r\n tp += int(eachline[25:-1])\r\n fo.close()\r\n print 'Total Performance Records Received from Markit: %d in %d files' % (tp, tf) \r\nexcept IOError, e:\r\n print 'File Processing Error: ', e\r\n\r\nprint '\\n\\n%s\\n%s\\n%s\\n\\n' % ('=' * 26, datetime.datetime.now(), '=' * 26)\r\n\r\n" }, { "alpha_fraction": 0.5253807306289673, "alphanum_fraction": 0.5355330109596252, "avg_line_length": 23.225807189941406, "blob_id": "7827a62d455b1710ea0d017c59137fcbf28e71e8", "content_id": "a0fbe6b4e26379b7950e978021222a1541fc9a8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 788, "license_type": "no_license", "max_line_length": 72, "num_lines": 31, "path": "/0011_Classes_Objects.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\nimport sys\r\n\r\nclass HelloWorld(object):\r\n \"\"\" A Hello World Class \"\"\"\r\n\r\n stava = 25\r\n\r\n def __init__(self, name):\r\n \"\"\" Initialize with name for the object \"\"\"\r\n self.name = name\r\n print \"My Name is\", self.name\r\n print \"I belong to\" \r\n \r\n def set_age(self, age=0):\r\n \"\"\" Introduce and initialize age attribute \"\"\"\r\n self.age = age\r\n \r\n def print_attribs(self):\r\n \"\"\" Display Attributes \"\"\"\r\n names = self.name.split()\r\n print \"The name is %s. %s %s.\" % (names[1], names[0], names[1]) \r\n\r\nprint 'ARGC', len(sys.argv)\r\nif len(sys.argv) > 1: \r\n hw = HelloWorld(sys.argv[1])\r\nelse:\r\n hw = HelloWorld('Maltova Navarathnakurma')\r\nhw.print_attribs() \r\nprint 'Static Variable', hw.stava\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6243272423744202, "alphanum_fraction": 0.6426264643669128, "avg_line_length": 39.95454406738281, "blob_id": "094a000c5bb2e842639734675af606e2c64da08f", "content_id": "676abfc4e9b5e4cb550c7be2e1d0fb6095527380", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "no_license", "max_line_length": 110, "num_lines": 22, "path": "/0003_String_Slicing_SplitSolution.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\ns = \"this is a string, a\"\r\n\r\nprint '<'+s+'>'\r\nprint '<'+s[-2:]+'>'\r\nprint '<'+s[:-3]+'>'\r\nprint '<'+s[:-3]+s[-2:]+'>'\r\n\r\n# https://stackoverflow.com/questions/1010961/string-slicing-python\r\n# s = \"this is a string, a\"\r\n# where ','(comma) will always be the 3rd last character, aka s[-3].\r\n# I am thinking of ways to remove the ',' but can only think of \r\n# converting the string into a list, deleting it, and converting it back to a string. \r\n# This however seems a bit too much for simple task. \r\n# How can i accomplish this in a simpler way?\r\n\r\n# Normally, you would just do:\r\n# s = s[:-3] + s[-2:]\r\n# The s[:-3] gives you a string up to, but not including, the comma you want removed (\"this is a string\") and \r\n# the s[-2:] gives you another string starting one character beyond that comma (\" a\").\r\n# Then, joining the two strings together gives you what you were after (\"this is a string a\").\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6104146838188171, "alphanum_fraction": 0.6335583329200745, "avg_line_length": 47.380950927734375, "blob_id": "cdacf37f92db1935f14ee19826658b844c417246", "content_id": "475b4565cc4bd8efe856e62d80fda5d7a5ce10c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2074, "license_type": "no_license", "max_line_length": 119, "num_lines": 42, "path": "/0002_String_Slicing.py", "repo_name": "Mohan-Palat/mohan-learn-python", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\n\r\ns = 'Hello World!'\r\n\r\nprint 'String is %s' % s\r\n\r\nprint \"\"\"Printing s[-1]+s[-2]+s[-12]+'|'\"\"\"\r\nprint s[-1]+s[-2]+s[-12]+'|'\r\n\r\nprint 'Printing s[6:11] = World'\r\nprint s[6:11]\r\n\r\n# https://developers.google.com/edu/python/strings\r\n# String Slices\r\n# \r\n# The \"slice\" syntax is a handy way to refer to sub-parts of sequences -- typically strings and lists. \r\n# The slice s[start:end] is the elements beginning at start and extending up to but not including end. \r\n# Suppose we have s = \"Hello\"\r\n# [ H ][ e ][ l ][ l ][ o ]\r\n# [ 0 ][ 1 ][ 2 ][ 3 ][ 4 ]\r\n# [-5 ][-4 ][-3 ][-2 ][-1 ]\r\n# \r\n# the string 'hello' with letter indexes 0 1 2 3 4\r\n# - s[1:4] is 'ell' -- chars starting at index 1 and extending up to but not including index 4 \r\n# - s[1:] is 'ello' -- omitting either index defaults to the start or end of the string \r\n# - s[:] is 'Hello' -- omitting both always gives us a copy of the whole thing \r\n# (this is the pythonic way to copy a sequence like a string or list) \r\n# - s[1:100] is 'ello' -- an index that is too big is truncated down to the string length \r\n# \r\n# The standard zero-based index numbers give easy access to chars near the start of the string. \r\n# As an alternative, Python uses negative numbers to give easy access to the chars at the end of the string: \r\n# s[-1] is the last char 'o', s[-2] is 'l' the next-to-last char, and so on. \r\n# Negative index numbers count back from the end of the string:\r\n# - s[-1] is 'o' -- last char (1st from the end) \r\n# - s[-4] is 'e' -- 4th from the end \r\n# - s[:-3] is 'He' -- going up to but not including the last 3 chars. \r\n# - s[-3:] is 'llo' -- starting with the 3rd char from the end and extending to the end of the string. \r\n# \r\n# It is a neat truism of slices that for any index n, s[:n] + s[n:] == s. \r\n# This works even for n negative or out of bounds. \r\n# Or put another way s[:n] and s[n:] always partition the string into two string parts, conserving all the characters. \r\n# As we'll see in the list section later, slices work with lists too.\r\n" } ]
15
jasmultani5391/Contraceptive-Method-Use
https://github.com/jasmultani5391/Contraceptive-Method-Use
c2d5a04024e1701784908806905c649268e48a11
b235d7b4f0d893905da8f8b7be93f284440cdb92
a1d0d8e5a1e92e21e9d6c642a563071cd2b04b72
refs/heads/master
2020-08-31T05:22:23.068725
2020-03-25T21:55:13
2020-03-25T21:55:13
218,603,202
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6958214640617371, "alphanum_fraction": 0.7124318480491638, "avg_line_length": 30.5710391998291, "blob_id": "87213d3906bb6f7d34eaa3bdb9af1e01ecb7ea0b", "content_id": "588f252fafb837a6faf7bd3b42f46cc34d60d447", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11559, "license_type": "no_license", "max_line_length": 173, "num_lines": 366, "path": "/contraceptive use - indonesia - uci machine learning repository.py", "repo_name": "jasmultani5391/Contraceptive-Method-Use", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[141]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import tree\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import linear_model\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import precision_score, recall_score, f1_score\n\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom numpy import array\nfrom numpy import argmax\n\ndata = pd.read_csv('C:\\\\Users\\Jasmine\\Downloads\\cmc.data', sep=',')\ndata.columns = [\n 'wife_age', \n 'wife_edu', \n 'husband_edu', \n '#kids', \n 'wife_religion', \n 'wife_working?', \n 'husb_job', \n 'standard_of_living', \n 'media_exposure', \n 'bcontrol']\nprint(data.head(10))\n\n\n### info on the features, from UCI machine learning repository\n### \n# 1. Wife's age (numerical)\n# 2. Wife's education (categorical) 1=low, 2, 3, 4=high\n# 3. Husband's education (categorical) 1=low, 2, 3, 4=high\n# 4. Number of children ever born (numerical)\n# 5. Wife's religion (binary) 0=Non-Islam, 1=Islam\n# 6. Wife's now working? (binary) 0=Yes, 1=No\n# 7. Husband's occupation (categorical) 1, 2, 3, 4\n# 8. Standard-of-living index (categorical) 1=low, 2, 3, 4=high\n# 9. Media exposure (binary) 0=Good, 1=Not good\n# 10. Contraceptive method used (class attribute) 1=No-use, 2=Long-term, 3=Short-term\n\n\n\n\n\nsns.countplot(y = 'wife_age', hue = 'bcontrol', data = data)\n\n\n#higher the number ~ higher the wife's education\n#volume of birth control increases by education but doesn't surpass \"not using bcontrol\" until the highest education\n#sns.countplot(y = 'wife_edu', hue = 'bcontrol', data = data)\n\n#wowowow, not using bc exceeds each category\n#sns.countplot(y = 'husband_edu', hue = 'bcontrol', data = data)\n\n#general trend of trying to start a family, 0 = most likely to not have bcontrol\n#sns.countplot(y = '#kids', hue = 'bcontrol', data = data)\n\n#wife's religion 0 = nonIslam, 1 = Islam\n#major change---all choices have equal weight under non-religious category whereas Islam has some preferences\n#sns.countplot(y = 'wife_religion', hue = 'bcontrol', data = data)\n\n#wife working = 1, wife not working = 0\n#volume increases from \"wife working\" to \"not working\", but the general choice to stay without birth control wins in each\n#sns.countplot(y = 'wife_working?', hue = 'bcontrol', data = data)\n\n#sns.countplot(y = 'husb_job', hue = 'bcontrol', data = data)\n\n#volume to use bc increases with higher standard of living, but choice to do use bc wins\n#sns.countplot(y = 'standard_of_living', hue = 'bcontrol', data = data)\n\n# 0 = good media coverage, 1 = not good\n#sns.countplot(y = 'media_exposure', hue = 'bcontrol', data = data)\n\n\nsns.lmplot(x='wife_age', y='#kids', data=data,\n fit_reg=False, # No regression line\n hue= 'bcontrol') # Color by birth control\n\n#above shows a really nice trend\n#those who aren't on birth control, are often the ones with 0 or 1 kid (probably to start a family)\n#I notice that there's another clump of women (around age 40, with 10+ kids) who also don't have use birth control\n#data.describe()\n\nsns.lmplot(x='wife_age', y='wife_edu', data=data,\n fit_reg=False, # No regression line\n hue= 'bcontrol') # Color by birth control\n\n\n\n# In[144]:\n\n\n#prepping the data so that numbers are normalized; normalize data that isn't already in a binary code (wife_age, wife_edu, husband_edu< #kids, husb_job, standard_of_living)\n\n\nclass Cleanup():\n def normdata(self, datapoint):\n minimum = min(datapoint)\n maximum = max(datapoint)\n normalized = []\n for i in datapoint:\n normalized += [(i-minimum)/(maximum-minimum)]\n return normalized\n \n def countinglabels(self, my_list):\n freq = {}\n for i in my_list:\n if (i in freq):\n freq[i] += 1\n else:\n freq[i] = 1\n return freq\n \n def label(self, data):\n lbl = []\n for i in data:\n if i == 1:\n lbl.append(0)\n else:\n lbl.append(1)\n return lbl\n \n\nwifeage = Cleanup()\ndata['wife_age_norm'] = wifeage.normdata(data['wife_age'])\n\nwifeedu = Cleanup()\ndata['wife_edu_norm'] = wifeedu.normdata(data['wife_edu'])\n\nhusbedu = Cleanup()\ndata['husband_edu_norm'] = husbedu.normdata(data['husband_edu'])\n\nhusbjob = Cleanup()\ndata['husb_job_norm'] = husbjob.normdata(data['husb_job'])\n\nnumkids = Cleanup()\ndata['#kids_norm'] = husbjob.normdata(data['#kids'])\n\nstandardofliving = Cleanup()\ndata['#standard_of_living_norm'] = husbjob.normdata(data['standard_of_living'])\n\n\n# also want to simplify the target some more; let's make 0 = no birth control at all (previously 1 in raw data),\n# and 1 = some or highest form of data (previously 2 and 3 in raw data, respectively)\n\nlist_bcontrol = list(data['bcontrol'])\nbinary_bc = Cleanup()\nbinary_bcontrol = binary_bc.label(list_bcontrol)\n\n\n#below helps us double check that the lengh of the raw data's birth control column is equal to the binary birth control label we created\nlistbcontrol = Cleanup()\nlistbcontrol = listbcontrol.countinglabels(list_bcontrol)\nprint(listbcontrol)\n\nbinbcontrol = Cleanup()\nbinbcontrol = binbcontrol.countinglabels(binary_bcontrol)\nprint(binbcontrol)\n\n#print(333+511), this equals 844\n\n\n# In[153]:\n\n\n#Feature engineering: let's decide which features to include for our future machine learning algorithm\n\n\nfeatures = pd.DataFrame(\n {'wifeage_norm': data['wife_age_norm'],\n 'wifeedu_norm': data['wife_edu_norm'],\n 'husbandedu_norm': data['husband_edu_norm'],\n 'kids_norm': data['#kids_norm'],\n 'wife_religion' : data['wife_religion'],\n 'wife_working?' : data['wife_working?'],\n 'husbjob_norm' : data['husb_job_norm'],\n 'standardofliving_norm': data['#standard_of_living_norm'],\n 'media_exposure': data['media_exposure']\n })\n\nlabel = pd.DataFrame({'label' : binary_bcontrol})\nmultilabel = pd.DataFrame({'label' : data['bcontrol']})\ndata['binary label'] = binary_bcontrol\n\n\nfeat_label = [features, label]\n\n\n\nprint(data.head(5))\n\n\n# In[146]:\n\n\n#logistic regression algorithm\n\ntraindata, testdata, trainlbl, testlbl = train_test_split(features,label,test_size = 0.2, random_state = 3)\n\nscaler = StandardScaler()\ntraindata = scaler.fit_transform(traindata)\ntestdata = scaler.transform(testdata)\n\nmodel = LogisticRegression()\nmodel.fit(traindata, np.ravel(trainlbl))\n\n\n# Score the model on the train data\ntrainmodel = model.score(traindata, trainlbl) #should parameters be the same as .fit()\nprint('train score is ' + str(round(trainmodel*100,2)) + ' %')\n# Score the model on the test data\ntestmodel = model.score(testdata, testlbl)\nprint('test score is ' + str(round(testmodel*100)) + ' %')\n\n\n# In[148]:\n\n\n#decision forest\n\ntraindataF, testdataF, trainlblF, testlblF = train_test_split(features, label, random_state = 3) #random-state is sqrt of number of total features from raw data\n\n\nforest = RandomForestClassifier(random_state=3)\nforest.fit(traindataF, np.ravel(trainlblF))\nprint('Accuracy of Forest classifier is ' + str(forest.score(testdataF, testlblF)*100) + ' %')\nweights_features = forest.feature_importances_\ncolumns = features.head(0)\nvariable_weights = list(zip(columns, weights_features))\nprint('\\t')\nprint(variable_weights)\nbiggest_index = max(weights_features)\nprint('\\t')\nprint(biggest_index) #this states that wife_age_norm and kids_norm are the leading two variables that influence correlation with label\n\n\n# In[154]:\n\n\n#evaluating features\n\n# accuracy was previously 69.8369% for our Forest Classifier...which means\n# it was about 69.83 % correct ((true positive+true negative)/total)\n\n\n\n\n#using info from Forest classifier\nscores = []\n\npredictlabel= forest.predict(testdataF)\nprecisionForest = precision_score(testlblF, predictlabel, average = 'binary')\nrecallForest = recall_score(testlblF, predictlabel, average='binary')\nF1Forest = f1_score(testlblF, predictlabel, average='binary')\n\nscores.append(precisionForest*100)\nscores.append(recallForest*100)\nscores.append(F1Forest*100)\n\nscorenames = ['precision %', 'recall %', 'F1 %']\nForestScores = list(zip(scorenames, scores))\n\nprint(ForestScores)\n\n\nsns.lmplot(x='wife_age', y='#kids', data=data,\n fit_reg=False, # No regression line\n hue= 'binary label') # Color by birth control\n\n\n\n# when our model predicts positive, it was 73.54 % correct\n# precision helps when cost of false positive is high (the woman is reported to be taking birth control, but actually isn't)\n# recall helps with cost of false negative is high (the woman is stated to not use birth control, but actually is using it)\n# F1 score is an overall measure of model's accuracy (combines precision and recall)...(2* (precisions * recall)/(precision+recall)); closer to 1 the better\n\n\n# In[150]:\n\n\n## instead of using labels as a binary (uses birth control or not), will set up the decision forest classification to adjust for multiclass labels\n\ntraindataFII, testdataFII, trainlblFII, testlblFII = train_test_split(features, multilabel, random_state = 3) #random-state is sqrt of number of total features from raw data\n\n\nforestII = RandomForestClassifier(random_state=3)\nforestII.fit(traindataFII, np.ravel(trainlblFII))\nprint('Accuracy of Forest classifier is ' + str(forestII.score(testdataFII, testlblFII)*100) + ' %')\nweights_featuresII = forestII.feature_importances_\ncolumns = features.head(0)\nvariable_weightsII = list(zip(columns, weights_featuresII))\nprint(\"\\t\")\nprint(variable_weightsII)\nbiggest_indexII = max(weights_featuresII)\n#print(biggest_indexII)\n\n#performance drops terribly...maybe I need to use a different algorithm to address multi-classification?\n\n\nscoresII = []\n\npredictlabelII= forestII.predict(testdataFII)\nprecisionForestII = precision_score(testlblFII, predictlabelII, average = 'weighted')\nrecallForestII = recall_score(testlblFII, predictlabelII, average='weighted')\nF1ForestII = f1_score(testlblFII, predictlabelII, average='weighted')\n\nscores.append(precisionForestII*100)\nscores.append(recallForestII*100)\nscores.append(F1ForestII*100)\n\nscorenames = ['precision %', 'recall %', 'F1 %']\nForestScoresII = list(zip(scorenames, scoresII))\n\nprint(\"\\t\")\nprint('precision %: ' + str(precisionForestII*100), 'recall %: ' + str(recallForestII*100), 'F1 %: ' + str(F1ForestII*100))\n#print(ForestScoresII)\n\n\n# In[151]:\n\n\n#will try to use K-nearest neighbor for multi-class use\n# ....this is even worse\n\ntraindataFII, testdataFII, trainlblFII, testlblFII = train_test_split(features, multilabel, random_state = 3) #random-state is sqrt of number of total features from raw data\n\nKNN = KNeighborsClassifier(n_neighbors = 48)\nKNNfit = KNN.fit(traindataFII, np.ravel(trainlblFII))\nKNNAcc = KNN.score(testdataFII, testlblFII)\n\nscores = []\n\npredictlabelKNN= KNN.predict(testdataFII)\nprecisionKNN = precision_score(testlblFII, predictlabelKNN, average='weighted')\nrecallKNN = recall_score(testlblFII, predictlabelKNN, average='weighted')\nF1KNN = f1_score(testlblFII, predictlabelKNN, average='weighted')\n\nscores.append(precisionKNN*100)\nscores.append(recallKNN*100)\nscores.append(F1KNN*100)\n\nscorenames = ['precision', 'recall', 'F1']\nKNNscores = list(zip(scorenames, scores))\nprint(\"Accuracy is: \" + str(KNNAcc*100))\nprint(\"Scores for KNN are: \" + str(KNNscores))\n\n\n# In[ ]:\n\n\n\n\n" } ]
1
vdesgrange/agent_based_modelling
https://github.com/vdesgrange/agent_based_modelling
e132b9a732ed7173cb36718e645eadb73363aba4
6ed333f38c214ab59f1c712320ef544c330ce5c6
53248fcf884be8f137f0da2e464a14fdf9f5c5d4
refs/heads/master
2023-02-24T17:51:52.253448
2021-02-03T20:39:49
2021-02-03T20:39:49
326,690,433
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6026018261909485, "alphanum_fraction": 0.6193278431892395, "avg_line_length": 48.267173767089844, "blob_id": "c7a1f8ead1f61b47023f44da1efa015ea85cf314", "content_id": "9179c24caaa94c146ef9957cbcce232ffa2a42aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6457, "license_type": "no_license", "max_line_length": 154, "num_lines": 131, "path": "/civil_violence/server.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import random\n\nfrom mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.UserParam import UserSettableParameter\nfrom mesa.batchrunner import BatchRunner\nfrom mesa.visualization.modules import CanvasGrid, ChartModule, PieChartModule\n\nfrom civil_violence_model import CivilViolenceModel\nfrom constant_variables import GraphType, Color\nfrom graph_utils import NetworkModuleExtended # For NetworkGrid visualization\nfrom graphics_portrayal import get_agent_portrayal, get_network_portrayal, get_grievance_portrayal\nfrom utils import read_configuration\n\n\ndef get_user_model_parameters():\n \"\"\"\n Get parameters of the agent-based model\n Default parameters based on Run 5 of Epstein type 1 civil violence model.\n :return: A dictionary of model parameters\n \"\"\"\n return {\n \"agent_density\": UserSettableParameter(\"slider\", \"Agent Density\", .7, 0., 1., step=.001,\n description=\"Initial percentage of citizen in population\"),\n \"active_agent_density\": UserSettableParameter(\"slider\", \"Active agent Density\", .1, 0., 1., step=.001,\n description=\"Initial percentage of active citizen in population\"),\n \"cop_density\": UserSettableParameter(\"slider\", \"Cop Density\", .04, 0, 1, step=.001,\n description=\"Initial percentage of cops in population\"),\n \"agent_vision\": UserSettableParameter(\"slider\", \"Agent Vision\", 7, 0, 10,\n description=\"Number of patches visible to citizens\"),\n \"cop_vision\": UserSettableParameter(\"slider\", \"Cop Vision\", 7, 0, 10,\n description=\"Number of patches visible to cops\"),\n \"initial_legitimacy_l0\": UserSettableParameter(\"slider\", \"Initial Central authority legitimacy\", .8, 0, 1,\n step=.01,\n description=\"Global parameter: Central authority legitimacy\"),\n \"active_threshold_t\": UserSettableParameter(\"slider\", \"Active Threshold\", .1, 0, 1, step=.01,\n description=\"Threshold that agent's Grievance must exceed Net Risk to go active\"),\n \"max_jail_term\": UserSettableParameter(\"slider\", \"Max Jail Term\", 1000, 0, 1000,\n description=\"Maximum number of steps that jailed citizens stay in\"),\n \"inf_threshold\": UserSettableParameter(\"slider\", \"Influencer threshold\", 150, 0, 150, \n description=\"Amount of nodes that need to be connected to consider agents influencers.\"),\n \"graph_type\": UserSettableParameter(\"choice\", \"GraphType\", value=GraphType.BARABASI_ALBERT.name,\n choices=[\"NONE\", GraphType.ERDOS_RENYI.name, GraphType.BARABASI_ALBERT.name, GraphType.WATTS_STROGATZ.name])\n }\n\n\ndef get_visualization_elements(model_paramsl, show_network=False):\n\n # 2D cellular automata representing real-world environment\n canvas_element = CanvasGrid(\n get_agent_portrayal,\n grid_width=model_paramsl['width'], grid_height=model_paramsl['height'],\n canvas_width=500, canvas_height=500)\n\n grievance_element = CanvasGrid(\n get_grievance_portrayal,\n grid_width=model_paramsl['width'], grid_height=model_paramsl['height'],\n canvas_width=500, canvas_height=500)\n\n\n # Graph representing agent's social network\n network_element = NetworkModuleExtended(get_network_portrayal, canvas_width=500, canvas_height=500, library='sigma')\n\n # Chart for amount of agents during the run\n agents_state_chart = ChartModule([{\"Label\": \"QUIESCENT\", \"Color\": Color['QUIESCENT'].value},\n {\"Label\": \"ACTIVE\", \"Color\": Color['ACTIVE'].value},\n {\"Label\": \"JAILED\", \"Color\": Color['JAILED'].value}], 100, 270)\n\n grievance_chart = ChartModule([{\"Label\": \"LEGITIMACY\", \"Color\": Color['QUIESCENT'].value},\n # {\"Label\": \"Hardship\", \"Color\": Color['ACTIVE'].value}\n ], 50, 135)\n\n # outbreak_chart = ChartModule([{\"Label\": \"OUTBREAKS\", \"Color\": Color['QUIESCENT'].value},\n # # {\"Label\": \"Hardship\", \"Color\": Color['ACTIVE'].value}\n # ], 50, 135)\n\n pie_chart = PieChartModule([{\"Label\": \"QUIESCENT\", \"Color\": Color['QUIESCENT'].value},\n {\"Label\": \"ACTIVE\", \"Color\": Color['ACTIVE'].value},\n {\"Label\": \"JAILED\", \"Color\": Color['JAILED'].value}], 200, 500)\n\n elements = [canvas_element, grievance_element, agents_state_chart, pie_chart, grievance_chart]\n if show_network:\n elements.insert(1, network_element)\n\n return elements\n\n\ndef run(configuration, seed=None):\n \"\"\"\n Run the mesa server\n :param configuration: configuration used by the model.\n :param seed: random seed. By default None.\n \"\"\"\n \"\"\"\n to get results for multiple iterations, run underlying code and add this to default.json in configurations: \n \"inf_threshold\": 50,\n \"active_threshold_t\": 0.5,\n \"graph_type\": \"GraphType.ERDOS_RENYI.name\" \n \"\"\"\n\n #batch_run = BatchRunner(CivilViolenceModel, fixed_parameters = config, iterations = 10)\n #batch_run.run_all()\n #data = batch_run.get_model_vars_dataframe()\n #data.head()\n\n\n random.seed(seed)\n\n model_params = get_user_model_parameters()\n\n # By updating model_params with configuration after getting the user settable parameters,\n # it let us provide fixed values for our model which won't be overwritten by the user choice.\n # Just remove attribute from the configuration file to get user interface back.\n model_params.update(configuration) # Overwritten user parameters don't appear in the graphic interface\n model_params.update({'seed': seed})\n\n\n server = ModularServer(\n CivilViolenceModel,\n get_visualization_elements(model_params, show_network=False),\n name=\"Civil violence with network model\",\n model_params=model_params\n )\n print(model_params)\n\n server.port = 8521\n server.launch()\n\n\nif __name__ == '__main__':\n config = read_configuration()\n run(config, None)\n\n\n\n" }, { "alpha_fraction": 0.5600146055221558, "alphanum_fraction": 0.5789857506752014, "avg_line_length": 31.64285659790039, "blob_id": "24c84fbec312d506e2318a94c0b53b31c76f6fe8", "content_id": "8b31c0fb367aea38a4ba2ca5d82522271e84dc21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2741, "license_type": "no_license", "max_line_length": 118, "num_lines": 84, "path": "/civil_violence/sobol_plot.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "from SALib.analyze import sobol\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import combinations\n\n# from Sobol import problem\n\ndef plot_index(s, params, i, title=''):\n \"\"\"\n Creates a plot for Sobol sensitivity analysis that shows the contributions\n of each parameter to the global sensitivity.\n\n Args:\n s (dict): dictionary {'S#': dict, 'S#_conf': dict} of dicts that hold\n the values for a set of parameters\n params (list): the parameters taken from s\n i (str): string that indicates what order the sensitivity is.\n title (str): title for the plot\n \"\"\"\n\n if i == '2':\n p = len(params)\n params = list(combinations(params, 2))\n indices = s['S' + i].reshape((p ** 2))\n indices = indices[~np.isnan(indices)]\n errors = s['S' + i + '_conf'].reshape((p ** 2))\n errors = errors[~np.isnan(errors)]\n else:\n indices = s['S' + i]\n errors = s['S' + i + '_conf']\n plt.figure()\n\n l = len(indices)\n\n plt.title(title)\n plt.ylim([-0.2, len(indices) - 1 + 0.2])\n plt.yticks(range(l), params)\n plt.errorbar(indices, range(l), xerr=errors, linestyle='None', marker='o')\n plt.axvline(0, c='k')\n\n\ndef sobol_plot_main():\n problem = {\n 'num_vars': 3,\n 'names': ['active_threshold_t', 'initial_legitimacy_l0', 'max_jail_term'],\n 'bounds': [[0.01, 1], [0.01, 1], [1, 100]]\n }\n\n file_path = [\n # './archives/saved_data_sobol_1611799908.npy',\n './archives/saved_data_sobol_no_network_1611861983.npy',\n ]\n\n for path in file_path:\n with open(path, 'rb') as f:\n data = np.load(f, allow_pickle=True)[()]\n\n data = pd.DataFrame(data, columns = ['active_threshold_t', 'initial_legitimacy_l0', 'max_jail_term', 'Run',\n 'QUIESCENT', 'ACTIVE', 'JAILED', 'OUTBREAKS', 'LEGITIMACY'])\n Y = data['OUTBREAKS'].values\n\n # loaded_data = pd.DataFrame(data, columns=['active_threshold_t', 'initial_legitimacy_l0', 'max_jail_term', 'Run',\n # 'QUIESCENT', 'ACTIVE', 'JAILED', 'OUTBREAKS', 'LEGITIMACY'])\n # loaded_data = loaded_data.drop(loaded_data.index[2992:])\n # Y = loaded_data['OUTBREAKS'].values\n\n Si_outbreaks = sobol.analyze(problem, Y, print_to_console=False)\n Si = Si_outbreaks\n\n plot_index(Si, problem['names'], '1', 'First order sensitivity')\n plt.show()\n\n # Second order\n plot_index(Si, problem['names'], '2', 'Second order sensitivity')\n plt.show()\n\n # Total order\n plot_index(Si, problem['names'], 'T', 'Total order sensitivity')\n plt.show()\n\n\nif __name__ == '__main__':\n sobol_plot_main()" }, { "alpha_fraction": 0.5794233083724976, "alphanum_fraction": 0.5861074924468994, "avg_line_length": 40.58038330078125, "blob_id": "041a380427a1d9cac0692c7f2273289830be1aa6", "content_id": "90c6ed8f8765807f0d5c4a4ba0a74b2e957bcd30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15260, "license_type": "no_license", "max_line_length": 120, "num_lines": 367, "path": "/civil_violence/civil_violence_model.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import json\nimport random\nfrom datetime import datetime\nfrom mesa import Model\nfrom mesa.space import MultiGrid\nfrom mesa.time import RandomActivation\nfrom mesa.datacollection import DataCollector\nfrom civil_violence_agents import Citizen, Cop\nfrom constant_variables import State, GraphType\nfrom graph_utils import generate_network, print_network\nfrom figure import create_fig, run_analysis\nfrom utils import *\n\n\nclass CivilViolenceModel(Model):\n \"\"\" Civil violence model class \"\"\"\n def __init__(self,\n max_iter=200,\n height=40, width=40,\n agent_density=0.7, agent_vision=7,\n active_agent_density=0.01,\n cop_density=0.04, cop_vision=7,\n inf_threshold=40, tackle_inf=False,\n k=2.3, graph_type=GraphType.BARABASI_ALBERT.name,\n p=0.1, p_ws=0.1,\n directed=False, max_jail_term=30,\n active_threshold_t=0.1, initial_legitimacy_l0=0.82,\n movement=True, seed=None):\n \"\"\"\n Create a new civil violence model.\n\n :param max_iter: Maximum number of steps in the simulation.\n :param height: Grid height.\n :param width: Grid width.\n :param agent_density: Approximate percentage of cells occupied by citizen agents.\n :param agent_vision: Radius of the agent vision in every direction.\n :param active_agent_density: Enforce initial percentage of cells occupied by active agents.\n :param cop_density: Approximate percentage of cells occupied by cops.\n :param cop_vision: Radius of the cop vision in every direction.\n :param initial_legitimacy_l0: Initial legitimacy of the central authority.\n :param inf_threshold: Amount of nodes that need to be connected before an agent is considered an influencer.\n :param tackle_inf: Remove influencer when outbreaks starting\n :param max_jail_term: Maximal jail term.\n :param active_threshold_t: Threshold where citizen agent became active.\n :param k: Arrest term constant k.\n :param graph_type: Graph used to build network\n :param p: Probability for edge creation\n :param directed: Is graph directed\n :param movement: Can agent move at end of an iteration\n :param seed: random seed\n\n Additional attributes:\n running : is the model running\n iteration : current step of the simulation\n citizen_list : a list storing the citizen agents added to the model. \n influencer_list : a list storing the citizien agents that are influencers\n\n grid : A 2D cellular automata representing the real world space environment\n network : A NetworkGrid with as many nodes as (citizen) agents representing the social network.\n Agent in the NetworkGrid are deep copy of agent in the MultiGrid has Mesa implementation is based on\n the usage of a single space. (Example: NetworkGrid place_agent method will change \"pos\" attribute from agent\n meaning one agent can't be on both MultiGrid and NetworkGrid).\n We maintain a dictionary of agent position instead.\n\n \"\"\"\n super().__init__()\n\n # =============================\n # === Initialize attributes ===\n # =============================\n\n self.seed = seed\n self.random.seed(self.seed)\n\n # Initialize Model grid and schedule\n self.height = height\n self.width = width\n self.grid = MultiGrid(self.width, self.height, torus=True) # Grid or MultiGrid ?\n self.schedule = RandomActivation(self)\n self.max_iter = max_iter\n self.iteration = 0 # Simulation iteration counter\n self.movement = movement\n\n # Set Model main attributes\n self.max_jail_term = max_jail_term\n self.active_threshold_t = active_threshold_t\n self.initial_legitimacy_l0 = initial_legitimacy_l0\n self.legitimacy = initial_legitimacy_l0\n self.k = k\n self.graph_type = graph_type\n\n self.agent_density = agent_density\n self.agent_vision = agent_vision\n self.active_agent_density = active_agent_density\n self.cop_density = cop_density\n self.cop_vision = cop_vision\n self.inf_threshold = inf_threshold\n\n self.citizen_list = []\n self.cop_list = []\n self.influencer_list = []\n self.jailings_list = [0, 0, 0, 0]\n self.outbreaks = 0\n self.outbreak_now = 0\n self.outbreak_influencer_now = False\n self.tackle_inf = tackle_inf\n\n date = datetime.now()\n self.path = f'output/{self.graph_type}_{date.month}_{date.day}_{date.hour}_{date.minute}_'\n\n # === Set Data collection ===\n self.datacollector = DataCollector(\n model_reporters=self.get_model_reporters(),\n agent_reporters=self.get_agent_reporters()\n )\n\n # ==============================\n # === Initialize environment ===\n # ==============================\n\n # Add agents to the model\n unique_id = 0\n for (contents, x, y) in self.grid.coord_iter():\n random_x = self.random.random()\n if random_x < self.agent_density:\n # Add agents\n agent = Citizen(\n unique_id=unique_id, model=self,\n pos=(x, y), hardship=self.random.random(), susceptibility=self.random.random(),\n influence=self.random.random(), expression_intensity=self.random.random(),\n legitimacy=self.initial_legitimacy_l0, risk_aversion=self.random.random(),\n threshold=self.active_threshold_t, vision=self.agent_vision)\n\n unique_id += 1\n self.citizen_list.append(agent)\n self.grid.place_agent(agent, (x, y)) # Place agent in the MultiGrid\n self.schedule.add(agent)\n\n elif random_x < (self.agent_density + self.active_agent_density):\n # Enforce an initial proportion of active agents\n agent = Citizen(\n unique_id=unique_id, model=self,\n pos=(x, y), hardship=self.random.random(), susceptibility=self.random.random(),\n influence=self.random.random(), expression_intensity=self.random.random(),\n legitimacy=self.initial_legitimacy_l0, risk_aversion=self.random.random(),\n threshold=0, vision=self.agent_vision)\n\n unique_id += 1\n self.citizen_list.append(agent)\n self.grid.place_agent(agent, (x, y)) # Place agent in the MultiGrid\n self.schedule.add(agent)\n\n elif random_x < (self.agent_density + self.active_agent_density + self.cop_density):\n # Add law enforcement officer\n agent = Cop(\n unique_id=unique_id, model=self,\n pos=(x, y), vision=self.cop_vision)\n\n unique_id += 1\n self.cop_list.append(agent)\n self.grid.place_agent(agent, (x, y)) # Place agent in the MultiGrid\n self.schedule.add(agent)\n\n # Generate a social network composed of every civilian agents\n self.G, self.network_dict = generate_network(self.citizen_list, graph_type, p, p_ws, directed, seed)\n # print_network(self.G, self.network_dict) # Uncomment to print the network.\n\n # With network in place, set the influencers.\n self.set_influencers(self.inf_threshold)\n\n # Create the graph show the frequency of degrees for the nodes\n create_fig(self.G.degree, draw=False) # Set draw=True to draw a figure\n\n self.running = True\n self.datacollector.collect(self)\n\n def step(self):\n \"\"\"\n One step in agent-based model simulation\n \"\"\"\n\n self.schedule.step()\n self.iteration += 1\n self.update_legitimacy()\n\n self.outbreak_score_monitoring()\n self.datacollector.collect(self)\n\n # Save initial values\n if self.iteration == 1:\n self.save_initial_values(save=False)\n\n # Stop the model after a certain amount of iterations.\n if self.iteration > self.max_iter:\n self.save_data(save=False)\n self.running = False\n\n def outbreak_score_monitoring(self):\n if self.tackle_inf:\n if self.count_type_citizens(\"ACTIVE\") > 30 and not self.outbreak_influencer_now:\n self.jail_influencer()\n self.outbreak_influencer_now = True\n\n if self.count_type_citizens(\"ACTIVE\") < 30:\n self.outbreak_influencer_now = False\n\n # Count amount of outbreaks\n if self.count_type_citizens(\"ACTIVE\") > 50 and self.outbreak_now == 0:\n self.outbreaks += 1 # Total number of outbreak\n self.outbreak_now = 1 # Indicate if outbreak now\n\n if self.count_type_citizens(\"ACTIVE\") < 50:\n self.outbreak_now = 0\n\n def save_data(self, save=True):\n\n if save is not False:\n df_end = self.datacollector.get_agent_vars_dataframe()\n name = self.path + 'run_values.csv'\n df_end.to_csv(name)\n else:\n pass\n\n def save_initial_values(self, save=False):\n \n if save is not False:\n dictionary_data = {\n 'agent_density': self.agent_density,\n 'agent_vision': self.agent_vision,\n 'active_agent_density': self.active_agent_density,\n 'cop_density': self.cop_density,\n 'initial_legitimacy_l0': self.initial_legitimacy_l0,\n 'inf_threshold': self.inf_threshold,\n 'max_iter': self.max_iter,\n 'max_jail_term': self.max_jail_term,\n 'active_threshold_t': self.active_threshold_t,\n 'k': self.k,\n 'graph_type': self.graph_type,\n }\n \n name = self.path + 'ini_values.json'\n a_file = open(name, \"w\")\n json.dump(dictionary_data, a_file)\n a_file.close()\n else:\n pass\n\n def update_legitimacy(self):\n \"\"\"\n Compute legitimacy (Epstein Working Paper 2001)\n \"\"\"\n self.jailings_list[3] = self.jailings_list[2]\n self.jailings_list[2] = self.jailings_list[1]\n nb_active_and_quiescent = self.count_type_citizens(\"ACTIVE\") + self.count_type_citizens(\"QUIESCENT\")\n self.jailings_list[1] = self.jailings_list[0] / nb_active_and_quiescent # + 1 to avoid division by zero\n self.jailings_list[0] = 0\n\n sum_jailed = self.jailings_list[1] - self.jailings_list[2] ** 2 - self.jailings_list[3] ** 3\n self.legitimacy = self.initial_legitimacy_l0 * (1 - sum_jailed)\n if self.legitimacy <= 0:\n self.legitimacy = 0\n\n def get_model_reporters(self):\n \"\"\"\n Dictionary of model reporter names and attributes/funcs\n Reference to functions instead of lambda are provided to handle multiprocessing case.\n Multiprocessing pool cannot directly handle lambda.\n \"\"\"\n return {\"QUIESCENT\": compute_quiescent,\n \"ACTIVE\": compute_active,\n \"JAILED\": compute_active,\n \"LEGITIMACY\": compute_legitimacy,\n \"INFLUENCERS\": compute_influencers,\n \"OUTBREAKS\": compute_outbreaks}\n\n def get_agent_reporters(self):\n \"\"\"\n Dictionary of agent reporter names and attributes/funcs\n \"\"\"\n\n return {\"Grievance\": \"grievance\",\n \"Hardship\": \"hardship\",\n \"State\": \"state\",\n \"Influencer\": \"influencer\",\n \"N_connections\": \"network_neighbors\",\n \"InfluencePi\": \"influence\"}\n\n def count_type_citizens(self, state_req):\n \"\"\"\n Helper method to count agents.\n Cop agents can't disappear from the map, so number of cops can be retrieved from model attributes.\n \"\"\"\n count = 0\n for agent in self.citizen_list:\n if type(agent).__name__.upper() == 'COP':\n continue\n if agent.jail_sentence and state_req == 'JAILED':\n count += 1\n else:\n if agent.state is State.ACTIVE and state_req == 'ACTIVE':\n count += 1\n elif agent.state == State.QUIESCENT and state_req == 'QUIESCENT':\n count += 1\n return count\n\n def remove_agent_grid(self, agent):\n \"\"\"\n Removes an agent from the grid.\n \"\"\"\n self.grid.remove_agent(agent)\n\n def add_jailed(self, agent):\n \"\"\"\n Un-jail an agent\n If the sentence of a jailed agent is over, place him back on a random empty cell in the grid.\n \"\"\"\n\n if len(self.grid.empties) == 0:\n raise Exception(\"There are no empty cells.\")\n\n new_pos = self.random.choice(list(self.grid.empties))\n self.grid.place_agent(agent, new_pos)\n\n def set_influencers(self, inf_threshold=150):\n \"\"\"\n If an agent in the network is connected to a large amount of nodes, this agent can\n be considered an influencer and receives a corresponding tag.\n :param inf_threshold: determine how many connections a node needs to be considered an influencer\n \"\"\"\n for agent in self.citizen_list:\n agent.set_influencer(len(list(self.G.neighbors(agent.network_node))), inf_threshold)\n if agent.influencer:\n self.influencer_list.append(agent)\n\n def remove_influencer(self):\n \"\"\"\n Removes a random agent with the influencer tag from the grid.\n Gives manual control over the model to evaluate the influence of influencers.\n \"\"\"\n if self.influencer_list:\n for i in range(len(self.influencer_list)):\n to_remove = self.random.choice(self.influencer_list)\n if to_remove.pos: # Check if influencer is jailed.\n self.grid.remove_agent(to_remove)\n self.influencer_list.remove(to_remove)\n self.citizen_list.remove(to_remove)\n self.schedule.remove(to_remove)\n self.G.remove_node(to_remove.network_node)\n\n def jail_influencer(self):\n \"\"\"\n Jail a random agent with the influencer tag from the grid.\n Gives manual control over the model to evaluate the influence of influencers.\n \"\"\"\n if self.influencer_list:\n for i in range(len(self.influencer_list)):\n arrestee = self.random.choice(self.influencer_list)\n if arrestee.state == State.JAILED: # Check if influencer is jailed.\n continue\n sentence = random.randint(1, self.max_jail_term)\n arrestee.jail_sentence = sentence\n arrestee.state = State.JAILED\n self.jailings_list[0] += 1\n if sentence > 0:\n self.remove_agent_grid(arrestee)\n\n print(arrestee.unique_id, ' was an influencer and has been jailed.')\n" }, { "alpha_fraction": 0.6338672637939453, "alphanum_fraction": 0.6430205702781677, "avg_line_length": 17.20833396911621, "blob_id": "9f65b43dc5ec2fca3e7592e40fec89a66e4bab7c", "content_id": "fc7011d2816c28d62b7be119522c4724300d02ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 75, "num_lines": 24, "path": "/civil_violence/constant_variables.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "from enum import Enum\n\nState = Enum('State', 'QUIESCENT ACTIVE JAILED COP')\n\nGraphType = Enum('GraphType', 'ERDOS_RENYI BARABASI_ALBERT WATTS_STROGATZ')\n\n\nclass Color(Enum):\n QUIESCENT = \"lightblue\"\n ACTIVE = \"red\"\n JAILED = \"lightyellow\"\n COP = \"black\"\n\n\nclass Shape(Enum):\n CITIZEN = \"circle\"\n COP = \"rect\"\n\n\nclass HardshipConst(Enum):\n DISTANCE = .5\n TIME_STEP = .1\n TRANSMISSION_RATE = .5\n HARDSHIP = 0\n" }, { "alpha_fraction": 0.5812597274780273, "alphanum_fraction": 0.5898133516311646, "avg_line_length": 36.82352828979492, "blob_id": "aae58a052546f7935a37c2826005b21af3aa8145", "content_id": "84c44c7864a88ba73451af0b05dc49f3ef5d9233", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5144, "license_type": "no_license", "max_line_length": 116, "num_lines": 136, "path": "/civil_violence/ofat_mp.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom batchrunner_mp import BatchRunnerMP\nfrom civil_violence_model import CivilViolenceModel\nfrom utils import *\n\n\ndef sensitive_analysis_no_network(problem, replicates=10, max_steps=200, distinct_samples=20, nr_processes=None):\n \"\"\"\n One-factor-at-a-time (OFAT) sensitivity analysis of civil violence model with network (no bias)\n Work with multiprocessing\n :param problem: details of the variable parameters\n :param replicates: number of replicates to be run\n :param max_steps: Maximal number of steps of the simulations\n :param distinct_samples: Number of samples per variables\n :param nr_processes: number of CPUs to be used. If None, by default all available CPUs will be used.\n \"\"\"\n\n data = {}\n run_data = {}\n\n for i, var in enumerate(problem['names']):\n # Get the bounds for this variable and get <distinct_samples> samples within this space (uniform)\n samples = np.linspace(*problem['bounds'][i], num=distinct_samples)\n\n # max_jail_term, agent_vision and cop_vision must be integers.\n integer_param = ['max_jail_term', 'agent_vision', 'cop_vision']\n if var in integer_param:\n samples = np.linspace(*problem['bounds'][i], num=distinct_samples, dtype=int)\n\n # Get default configuration\n configuration = read_configuration('./configurations/ofat_no_network.json')\n model_params = {}\n model_params.update(configuration) # Overwritten user parameters don't appear in the graphic interface\n model_params.update({'seed': None})\n\n # BatchRunnerMP used is a local modified version of the BatchRunnerMP class provided by mesa.\n # It handle the multiprocessing issue which prohibite\n batch = BatchRunnerMP(CivilViolenceModel,\n nr_processes=nr_processes,\n max_steps=max_steps,\n iterations=replicates,\n variable_parameters={var: samples},\n fixed_parameters=model_params,\n model_reporters={'All_Data': compute_datacollector, # multiprocessing pool can't handle\n \"QUIESCENT\": compute_quiescent, # lambda function.\n \"ACTIVE\": compute_active,\n \"JAILED\": compute_jailed,\n \"OUTBREAKS\": compute_outbreaks,\n \"INFLUENCERS\": compute_influencers,\n \"LEGITIMACY\": compute_legitimacy},\n display_progress=True)\n\n batch.run_all()\n\n batch_df = batch.get_model_vars_dataframe()\n batch_df = batch_df.drop('All_Data', axis=1)\n\n data[var] = batch_df\n run_data[var] = batch.get_collector_model()\n\n # Uncomment to save data per parameter.\n # path = 'archives/progress_data_ofat_{0}_{1}.npy'.format(var, int(time.time()))\n # with open(path, 'ab') as f:\n # np.save(f, data)\n\n # Save final data\n path = 'archives/saved_data_ofat_{0}.npy'.format(int(time.time()))\n with open(path, 'ab') as f:\n np.save(f, data)\n\n run_path = path+'_run'\n with open(run_path, 'ab') as f:\n np.save(f, run_data)\n\n return data, run_data\n\n\ndef plot_param_var_conf(ax, df, var, param, i):\n \"\"\"\n Helper function for plot_all_vars. Plots the individual parameter vs\n variables passed.\n\n Args:\n ax: the axis to plot to\n df: dataframe that holds the data to be plotted\n var: variables to be taken from the dataframe\n param: which output variable to plot\n \"\"\"\n x = df.groupby(var).mean().reset_index()[var]\n y = df.groupby(var).mean()[param]\n\n replicates = df.groupby(var)[param].count()\n err = (1.96 * df.groupby(var)[param].std()) / np.sqrt(replicates)\n\n ax.plot(x, y, c='k')\n ax.fill_between(x, y - err, y + err)\n\n ax.set_xlabel(var)\n ax.set_ylabel(param)\n\n\ndef plot_all_vars(problem, df, param):\n \"\"\"\n Plots the parameters passed vs each of the output variables.\n\n Args:\n df: dataframe that holds all data\n param: the parameter to be plotted\n \"\"\"\n\n f, axs = plt.subplots(problem['num_vars'], figsize=(5, 10))\n for i, var in enumerate(problem['names']):\n plot_param_var_conf(axs[i], df[var], var, param, i)\n\n\ndef ofat_main():\n \"\"\"\n Main function of the one-factor-at-a-time sensitivity analysis.\n \"\"\"\n problem = {\n 'num_vars': 5,\n 'names': ['active_threshold_t', 'initial_legitimacy_l0',\n 'max_jail_term', 'agent_vision', 'cop_vision'],\n 'bounds': [[0.01, 1], [0.01, 1], [1, 100], [1, 20], [1, 20]]\n }\n\n data, run_data = sensitive_analysis_no_network(problem, 10, 200, 20, None)\n for param in (\"OUTBREAKS\", \"ACTIVE\", \"QUIESCENT\", \"JAILED\", \"INFLUENCERS\", \"LEGITIMACY\"):\n plot_all_vars(problem, data, param)\n plt.show()\n\n\nif __name__ == '__main__':\n ofat_main()\n" }, { "alpha_fraction": 0.6705050468444824, "alphanum_fraction": 0.6723232269287109, "avg_line_length": 35.13868713378906, "blob_id": "9bcf41a67f4c41d721b997e132524add8f8b7993", "content_id": "61fd0d500117ccaeaef61e79c5d6f3cb3740e6f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4950, "license_type": "no_license", "max_line_length": 109, "num_lines": 137, "path": "/civil_violence/graph_utils.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import networkx as nx\nfrom mesa.visualization.modules import NetworkModule\nfrom constant_variables import GraphType\n\n\nclass NetworkModuleExtended(NetworkModule):\n \"\"\"\n NetworkModuleExtended is exactly NetworkModule class.\n Except it will provide the model as parameters instead of the graph G from model.G.\n This extended version is used to vizualise social network between agents\n while going around the limitation of mesa (implemented with the idea of using only one Space object).\n \"\"\"\n def render(self, model):\n return self.portrayal_method(model)\n\n\ndef generate_network(agent_list, graph_type, p, p_ws, directed=False, seed=None):\n \"\"\"\n Generate a network based on the provided parameters\n\n :param agent_list: List of agents to be added to the network\n :param p: Probability for edge creation\n :param p_ws: probability of rewiring each edge, used by Watts-Strogatz\n :param directed: True if directed, False if undirected\n :param seed: Indicator of random number generation state\n :param graph_type: constants to select a type of Graph.\n :return:\n \"\"\"\n\n if graph_type == GraphType.ERDOS_RENYI.name:\n return generate_erdos_renyi(agent_list, p, directed, seed)\n\n if graph_type == GraphType.BARABASI_ALBERT.name:\n return generate_barabasi_albert(agent_list, p, seed)\n\n if graph_type == GraphType.WATTS_STROGATZ.name:\n return generate_watts_strogatz(agent_list, p, p_ws, seed)\n\n # Default - no network\n no_graph = nx.Graph()\n network_dict = dict()\n for idx, agent in enumerate(agent_list):\n no_graph.add_node(idx) # Agents are added to a graph with no edge (for compatibility)\n agent.network_node = idx\n network_dict[agent.network_node] = agent\n\n return no_graph, network_dict\n\n\ndef generate_erdos_renyi(agent_list, p, directed=False, seed=None):\n \"\"\"\n Generate an Erdos Renyi graph. Add as many nodes as there's agents.\n :param agent_list: List of agents (citizen)\n :param p: probability of creating an edge\n :param directed: True if graph is directed\n :param seed: randomization seed\n :return: networkx graph and dictionnary mapping graph node to agent reference.\n \"\"\"\n\n num_nodes = len(agent_list)\n graph = nx.generators.random_graphs.erdos_renyi_graph(num_nodes, p, seed, directed)\n network_dict = dict()\n\n agent_number = 0\n for agent in agent_list:\n # Set the localisation of the agent in the social network\n agent.network_node = list(graph.nodes)[agent_number]\n network_dict[agent.network_node] = agent\n agent_number += 1\n return graph, network_dict\n\n\ndef generate_barabasi_albert(agent_list, p, seed=None):\n \"\"\"\n Generate an Erdos Renyi graph. Add as many nodes as there's agents.\n :param agent_list: List of agents (citizen)\n :param p: probability of creating an edge\n :param seed: randomization seed\n :return: networkx graph and dictionnary mapping graph node to agent reference.\n \"\"\"\n\n num_nodes = len(agent_list)\n m = int((p*num_nodes-1)/2)\n graph = nx.generators.random_graphs.barabasi_albert_graph(num_nodes, m, seed)\n network_dict = dict()\n\n agent_number = 0\n for agent in agent_list:\n # Set the localisation of the agent in the social network\n agent.network_node = list(graph.nodes)[agent_number]\n network_dict[agent.network_node] = agent\n agent_number += 1\n return graph, network_dict\n\n\ndef generate_watts_strogatz(agent_list, p, p_ws, seed=None):\n \"\"\"\n Generate an Erdos Renyi graph. Add as many nodes as there's agents.\n :param agent_list: List of agents (citizen)\n :param p: probability of creating an edge\n :param p_ws: probability of rewiring each edge\n :param seed: randomization seed\n :return: networkx graph and dictionnary mapping graph node to agent reference.\n \"\"\"\n\n num_nodes = len(agent_list)\n k = int((num_nodes-1)*p)\n graph = nx.generators.random_graphs.watts_strogatz_graph(num_nodes, k, p_ws, seed)\n network_dict = dict()\n\n agent_number = 0\n for agent in agent_list:\n # Set the localisation of the agent in the social network\n agent.network_node = list(graph.nodes)[agent_number]\n network_dict[agent.network_node] = agent\n agent_number += 1\n return graph, network_dict\n\n\ndef print_network(G, network_dict):\n \"\"\"\n Simple tool to print the population agent's network graph\n :param G: a graph from networkx module\n :param network_dict: map between node id and agent's\n \"\"\"\n\n print(\"######### Network #########\")\n\n for n in list(G.nodes):\n print(\"======\")\n neighbours = nx.all_neighbors(G, n)\n agent = network_dict[n]\n print(\"{} Agent {} localized at Node {} connected to :\".format(str(type(agent)), agent.unique_id, n))\n for m in neighbours:\n print(\"-- Agent {}\".format(m))\n\n print(\"###########################\")" }, { "alpha_fraction": 0.59822016954422, "alphanum_fraction": 0.6143704652786255, "avg_line_length": 28.456310272216797, "blob_id": "52a62a36161244330158ef988598a44ed5e2d0ef", "content_id": "98f10ed01590e81a765c8abbd880d449ec62c644", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3034, "license_type": "no_license", "max_line_length": 91, "num_lines": 103, "path": "/civil_violence/ofat_plot.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\nx_labels = dict({\n \"active_threshold_t\": \"Active threshold\",\n \"initial_legitimacy_l0\": \"Initial legitimacy\",\n \"max_jail_term\": \"Max jail term\",\n \"agent_vision\": \"Citizen vision\",\n \"cop_vision\": \"Cop vision\",\n})\n\ny_labels = dict({\n \"OUTBREAKS\": \"Number of outbreaks\",\n \"ACTIVE\": \"Number of active citizens\",\n \"QUIESCENT\": \"Number of quiescent citizens\",\n \"JAILED\": \"Number of jailed citizens\",\n \"INFLUENCERS\": \"Number of influencers\",\n \"LEGITIMACY\": \"Central authority legitimacy\"\n})\n\n\ndef plot_param_var_conf(ax, df, var, param, i):\n \"\"\"\n Helper function for plot_all_vars. Plots the individual parameter vs\n variables passed.\n\n Args:\n ax: the axis to plot to\n df: dataframe that holds the data to be plotted\n var: variables to be taken from the dataframe\n param: which output variable to plot\n \"\"\"\n x = df.groupby(var).mean().reset_index()[var]\n y = df.groupby(var).mean()[param]\n\n replicates = df.groupby(var)[param].count()\n err = (1.96 * df.groupby(var)[param].std()) / np.sqrt(replicates)\n\n ax.plot(x, y, c='k')\n ax.fill_between(x, y - err, y + err)\n\n x_label = x_labels[var]\n y_label = y_labels[param]\n\n ax.tick_params(axis='both', which='major', labelsize=6)\n ax.set_xlabel(x_label, size=7)\n\n if i == 2:\n ax.set_ylabel(y_label, size=9)\n\n\ndef plot_all_vars(problem, df, param):\n \"\"\"\n Plots the parameters passed vs each of the output variables.\n\n Args:\n problem: details of the data processed for each parameters\n df: dataframe that holds all data\n param: the parameter to be plotted\n \"\"\"\n\n f, axs = plt.subplots(5, figsize=(3, 5), dpi=300)\n\n for i, var in enumerate(problem['names']):\n plot_param_var_conf(axs[i], df[var], var, param, i)\n\n\ndef load_plot_archive(problem, file_paths):\n \"\"\"\n Utility to load one-factor-at-a-time sensitivity analysis archived data.\n Running sensitivity analysis takes a long time, so results are saved in archived files.\n In this file, the loaded data are plotted.\n\n :param problem: details of the data processed for each parameters\n :file_paths: paths to the archived data\n \"\"\"\n\n for path in file_paths:\n with open(path, 'rb') as f:\n data = np.load(f, allow_pickle=True)[()]\n\n for param in y_labels.keys():\n plot_all_vars(problem, data, param)\n plt.show()\n\n\nif __name__ == '__main__':\n\n # OFAT analysis for civil violence model without network\n file_paths = [\n # './archives/saved_data_1611773618.npy', # Same than saved_data_local_SA.npy\n './archives/saved_data_local_SA.npy',\n ]\n\n problem = {\n 'num_vars': 5,\n 'names': ['active_threshold_t', 'initial_legitimacy_l0',\n 'max_jail_term', 'agent_vision', 'cop_vision'],\n 'bounds': [[0.01, 1], [0.01, 1], [1, 100], [0.01, 0.4], [1, 20], [1, 20]]\n }\n\n load_plot_archive(problem, file_paths)\n" }, { "alpha_fraction": 0.6133963465690613, "alphanum_fraction": 0.6254863739013672, "avg_line_length": 34.27450942993164, "blob_id": "5697b1f300f54f655158e1f5de5efb66f665d72e", "content_id": "12d25955c444857cb1d7bedb0f990b19dc4873f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7196, "license_type": "no_license", "max_line_length": 119, "num_lines": 204, "path": "/civil_violence/ofat_post_processing.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\n\n'''\nREADME:\nTo run this script, please follow the following steps:\n 1. Adjust the following constants such that they correspond to those in the SA.py file. \n 2. Run the ofat_mp.py script. \n 3. Add the paths to the output files from the ofat_mp.py to the file_paths list (line27).\n 4. Depending on if the output files is a Sensitivity Analysis (multiple dictionaries), \n or a fixed run (single dictionary), several lines need to be commented in/out. \n 5. Run the ofat_work.py script. \n'''\n\nTHRESHOLD = 50\nITERATIONS = 10\nSTEPS = 20\nN_PARAMS = 6\nN_OUTPUT = 6\n\nPARAMS = ['active_threshold_t', 'initial_legitimacy_l0', 'max_jail_term', 'p', 'agent_vision', 'cop_vision']\nBOUNDS = [[0.01, 1], [0.01, 1], [1, 100], [0.01, 0.4], [1, 20], [1, 20]]\nOUTPUT_PARAMS = ['PARAM_VAL', 'MEAN_N', 'MEAN_PEAK_HEIGHT', 'MEAN_PEAK_WIDTH', 'MAX_PEAK_HEIGHT', 'MAX_PEAK_WIDTH']\n\nfile_paths = ['./archives/saved_data_1611773618.npy', './archives/saved_data_1611773618_run.npy']\n\n\ndef load_datacollector():\n \"\"\"\n Loads in the data from the specified paths into different dictionaries.\n \"\"\"\n data_dict = {}\n\n for path in file_paths:\n with open(path, 'rb+') as f:\n\n data = np.load(f, allow_pickle = True)[()]\n keys = data.keys()\n data_dict[path] = data\n \n return data_dict\n\n\ndef map_keys():\n \"\"\"\n Maps the used parameter bounds to key values for the data dictionary. \n Keys are of the form: (Parameter value, iteration)\n \"\"\"\n keys_dict = {}\n # iter_list = np.arange(ITERATIONS*STEPS).reshape((STEPS, ITERATIONS)) # In case of multiple exported dictionaries.\n iter_list = np.arange(ITERATIONS)\t# In case of a single exported dictionary\n for i in range(len(PARAMS)):\n param_list = []\n if PARAMS[i] == 'max_jail_term':\n param_range = np.linspace(BOUNDS[i][0], BOUNDS[i][1], STEPS, dtype=np.int32).reshape(STEPS, 1)\n else:\n param_range = np.linspace(BOUNDS[i][0], BOUNDS[i][1], STEPS).reshape(STEPS, 1)\n param_matrix = np.repeat(param_range, ITERATIONS, axis=1)\n for j in range(STEPS):\n # param_list.extend(list(zip(param_matrix[j], iter_list[j]))) # In case of multiple exported dictionaries.\n param_list.extend(list(zip(param_matrix[j], iter_list))) # In case of a single exported dictionary.\n keys_dict[PARAMS[i]] = param_list\n\n return keys_dict\n\n\ndef get_param_means(data, parameter):\n \"\"\"\n For a provided parameter, calculates the determined output values for every parameter/iteration\n configuration.\n\n Returns a dataframe with the output type as column names. Every row contains the mean values of \n the set amount of iterations for every parameter configuration.\n \"\"\"\n # Initialize outputs\n output = np.zeros((STEPS, N_OUTPUT)) \n keys = keys_dict[parameter]\n\n # Divide the key list in the different step sizes of the parameter.\n for i in range(STEPS):\n peak_heights = []\n peak_widths = []\n s_keys = keys[i*ITERATIONS : (i+1)*ITERATIONS]\n \n # Every key is an iteration\n for key in s_keys:\n actives = data[parameter][key]['ACTIVE']\n ph, pw = get_outbreaks(actives, THRESHOLD)\n peak_heights.extend(ph)\n peak_widths.extend(pw)\n \n # Output calculation\n mean_n_peaks = len(peak_heights)/ITERATIONS\n mean_peak_height = np.mean(np.array(peak_heights))\n mean_peak_width = np.mean(peak_widths)\n max_peak_height = np.max(peak_heights)\n max_peak_width = np.max(peak_widths)\n\n output[i] = [key[0], mean_n_peaks, mean_peak_height, mean_peak_width, max_peak_height, max_peak_width]\n\n output_df = pd.DataFrame({\n 'PARAM_VAL': output[:, 0],\n 'MEAN_N': output[:, 1],\n 'MEAN_PEAK_HEIGHT': output[:, 2],\n 'MEAN_OUTBREAK_DURATION': output[:, 3],\n 'MAX_PEAK_HEIGHT': output[:, 4],\n 'MAX_OUTBREAK_DURATION': output[:, 5]})\n \n return output_df # Can also return 'output' if the data is wanted in array form.\n\n\ndef get_outbreaks(data, threshold):\n \"\"\"\n Calculates the outbreaks from the actives data based on a certain threshold.\n The last codeblock defines the behavior when the provided data ends in an outbreak.\n Depending on the user, they might want to include/exclude that final outbreak.\n\n Returns:\n - An array with the peak size of every outbreak.\n - An array with the outbreak durations.\n \"\"\"\n outbreak_peaks = []\n outbreak_widths = []\n counting = False\n current_peak = 0\n start = 0\n\n for i in range(len(data)):\n\n if data[i] >= threshold and not counting:\n counting = True\n if current_peak < data[i]:\n current_peak = data[i]\n start = i\n\n elif data[i] >= threshold and counting:\n if current_peak < data[i]:\n current_peak = data[i]\n\n elif data[i] < threshold and counting:\n outbreak_peaks.append(current_peak)\n outbreak_widths.append(i-start)\n current_peak = 0\n counting = False\n\n if not outbreak_peaks and not counting: # Captures data without outbreaks, empty list break further calculations.\n outbreak_peaks.append(0)\n outbreak_widths.append(0)\n\n # Capture cases where timeline ends in an outbreak.\n # Uncomment if final outbreak needs to be included in calculation.\n # Obviously skewers data, but might be preferable over 0 or infinite outbreaks.\n \n # if not outbreak_peaks and counting: # Data is a single massive outbreak.\n # outbreak_peaks.append(current_peak)\n # outbreak_widths.append(len(data))\n # elif outbreak_peaks and counting: # Data ends in an outbreak.\n # outbreak_peaks.append(current_peak)\n # outbreak_widths.append(len(data)-start)\n \n return outbreak_peaks, outbreak_widths\n\n\ndef fix_keys(dictionary):\n \"\"\"\n This function trims the dictionary keys from ([all parameters], iteration) to a more manageable\n ('changed parameter', iteration) format.\n Like in the SA.py file, the 'max_jail_term'-parameter steps are casted to int.\n \"\"\"\n new_dictionary = {}\n for k in dictionary:\n if dictionary == 'max_jail_term':\n new_k = (int(k[0]), k[-1])\n else:\n new_k = (k[0], k[-1])\n new_dictionary[new_k] = dictionary[k]\n return new_dictionary\n\n\ndef save_csv(name, df):\n \"\"\"\n Save an output dataframe as a CSV file in the archives folder.\n \"\"\"\n path = 'archives/'+name+'_out.csv'\n df.to_csv(path)\n\n\nif __name__ == '__main__':\n # Run the script\n model_data = load_datacollector()\n run_data = model_data[file_paths[1]] # Step-wise DataCollector is only captured in the *_run.npy files.\n\n for dic in run_data:\n run_data[dic] = fix_keys(run_data[dic])\n\n keys_dict = map_keys()\n output_data = {}\n\n # Output calculation\n for param in run_data.keys():\n output_data[param] = get_param_means(run_data, param)\n # Saving output\n for df in output_data:\n save_csv(str(df), output_data[df])\n" }, { "alpha_fraction": 0.4685039222240448, "alphanum_fraction": 0.5511810779571533, "avg_line_length": 15.933333396911621, "blob_id": "5867c1bef1b10ab1ae3422ee446b86932a64961d", "content_id": "8ad67d6696b8b42b18e848b53000bd7e485d2a49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 254, "license_type": "no_license", "max_line_length": 38, "num_lines": 15, "path": "/civil_violence/Pipfile", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "[[source]]\nurl = \"https://pypi.python.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\nmesa = \"~=0.8.8\"\nnetworkx = \"~=2.5\"\npandas = \"~=1.1\"\nnumpy = \"~=1.19\"\nmatplotlib = \"~=3.3\"\nseaborn = \"~=0.11\"\njupyter = \"~=1.0\"\nipynb = \"~=0.5\"\nsalib = \"~=1.3\"\n" }, { "alpha_fraction": 0.6223821640014648, "alphanum_fraction": 0.6269950866699219, "avg_line_length": 38.70329666137695, "blob_id": "ab3ecc7c7e7cbcdc5f10cd2a0c443ffcc82f4656", "content_id": "e3db3bf6da78cd69282075bd5b403668c988c010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10839, "license_type": "no_license", "max_line_length": 119, "num_lines": 273, "path": "/civil_violence/civil_violence_agents.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import random\nimport math\nfrom mesa import Agent\nfrom constant_variables import State, HardshipConst\n\n\nclass Citizen(Agent):\n \"\"\"\n A citizen agent, part of the population.\n \"\"\"\n\n def __init__(self, unique_id, model, pos, hardship, susceptibility, influence, expression_intensity, \n legitimacy, risk_aversion, threshold, vision, jailable=True):\n \"\"\"\n Create a new citizen agent.\n\n Attributes:\n :param unique_id: unique id of the agent\n :param model: model to which agent belongs to\n :param pos: position of the agent in the space\n :param hardship: agent's perceived hardship, sum of endogenous and contagious hardship\n :param susceptibility: How susceptible this agent is to contagious hardship\n :param influence: How influentual this agent is to other agents\n :param expression_intensity: How strongly this agent expresses their hardship\n :param legitimacy: legitimacy of the central authority\n :param risk_aversion: agent's level risk aversion\n :param threshold: threshold beyond which agent become active\n :param vision: number of cells visible for each direction (N/S/E/W)\n :param state: Model state of the agent\n :param jailable: Flag that indicates if an agent can get arrested\n\n Other attributes:\n hardship_endo: endogenous hardship\n hardship_cont: contagious hardship\n network_node : agent's node_id in the graph representing the social network\n state: current state of the agent (default: Quiescent)\n jail_sentence: current jail sentence of the agent (default: 0)\n neighbors: List of neighbors in agent vision\n empty_cells: List of empty cells in agent vision\n \"\"\"\n\n super().__init__(unique_id, model)\n random.seed(model.seed)\n\n self.pos = pos # Position in MultiGrid space\n self.network_node = 0 # Position in graph\n\n self.hardship = hardship # Set equal to U(0, 1) for initialization\n self.hardship_endo = hardship\n self.hardship_cont = 0\n self.susceptibility = susceptibility\n self.influence = influence\n self.expression_intensity = expression_intensity\n self.legitimacy = legitimacy\n self.risk_aversion = risk_aversion\n self.threshold = threshold\n self.vision = vision\n self.jail_sentence = 0\n self.grievance = self.get_grievance()\n\n self.jailable = jailable\n self.influencer = False\n\n self.network_neighbors = [] # Neighbors in social network\n self.neighbors = [] # Neighbors in MultiGrid space\n self.empty_cells = [] # Empty cells around the agent in MultiGrid space\n\n self.state = State.ACTIVE if threshold == 0 else State.QUIESCENT\n\n def step(self):\n \"\"\"\n Citizen agent rules (Epstein 2002 model)\n \"\"\"\n\n # Jailed agent can't perform any action\n # After sentence resets state and contagious hardship\n if self.jail_sentence:\n self.jail_sentence -= 1\n\n if self.jail_sentence == 0:\n self.state = State.QUIESCENT # Jailed agent returns quiescent\n self.hardship_cont = 0\n self.model.add_jailed(self)\n return\n\n self.hardship = self.update_hardship()\n self.get_network_neighbors()\n self.update_neighbors() # Should we run this at each turn instead of retrieving the neighbors when necessary ?\n\n self.grievance = self.get_grievance()\n rule_a = self.grievance - self.get_net_risk() > self.threshold\n if self.state is State.QUIESCENT and rule_a:\n self.state = State.ACTIVE\n elif self.state is State.ACTIVE and not rule_a:\n self.state = State.QUIESCENT\n\n # Move agent in the 2D Grid\n if self.model.movement and self.empty_cells:\n new_pos = random.choice(self.empty_cells)\n self.model.grid.move_agent(self, new_pos)\n\n def update_neighbors(self):\n \"\"\"\n Keep track of neighbours and empty surrounding cells.\n \"\"\"\n\n # Moore = False because we check N/S/E/W\n neighborhood = self.model.grid.get_neighborhood(self.pos, moore=False, radius=self.vision)\n self.neighbors = self.model.grid.get_cell_list_contents(neighborhood)\n self.empty_cells = [c for c in neighborhood if self.model.grid.is_cell_empty(c)]\n\n def get_arrest_probability(self):\n \"\"\"\n Compute the arrest probability P of the agent (expanded from Epstein 2002 model)\n round of (C_v / (A_v + 1) is suggested to have more active agents in the model.\n Removing it might make difficult actual outbreaks.\n\n :return: 1 - exp(-k * C_v / int(A_v + 1))\n \"\"\"\n c_v = sum(isinstance(n, Cop) for n in self.neighbors)\n a_v = sum(isinstance(n, Citizen) and n.state is State.ACTIVE for n in self.neighbors)\n cop_to_agent_ratio = int(c_v / (a_v + 1)) # This modification is suggested to get active agent more easily\n return 1 - math.exp(-1 * self.model.k * cop_to_agent_ratio) # Rounding to min integer\n\n def get_net_risk(self):\n \"\"\"\n Compute the agent's net risk N (Epstein 2002 model)\n :return: R * P\n \"\"\"\n return self.risk_aversion * self.get_arrest_probability()\n\n def get_grievance(self):\n \"\"\"\n Compute the agent's grievance (Epstein 2002 model).\n Also works with the ABEC-model described in Huang et al. (2018) since only hardship is calculated differently.\n :return: H(1 - L)\n \"\"\"\n return self.hardship * (1 - self.model.legitimacy)\n\n def update_hardship(self):\n \"\"\"\n Hardship in the ABEC-model consists of endogenous hardship (U(0, 1) as the Epstein model)\n and contagious hardship which is updated at every timestep.\n\n Updates the contagious hardship and the perceived hardship.\n \"\"\"\n \n if self.hardship < 1:\n received_hardship = self.get_received_hardship()\n self.hardship_cont += received_hardship\n\n hardship = self.hardship_cont + self.hardship_endo\n\n # Ensure hardship has a maximum value of 1\n if hardship > 1:\n return 1\n else:\n return hardship\n\n def get_received_hardship(self, hardship_params=HardshipConst):\n \"\"\"\n Calculates the received contagious hardship of an agent by its neighbors.\n Is a product of various endo- and exogenous parameters.\n Transmission_rate is a parameter we can consider setting fixed because it is not of importance to our\n project, but removing it will increase the received hardship.\n Timestep, or delta_time, can also be considered fixed in this discrete time model.\n Distance is a parameter that is more or less incorporated in NetworkX, so perhaps set this fixed as well.\n :param hardship_params: default values\n \"\"\"\n # Fixed values for parameters\n distance = hardship_params.DISTANCE.value\n timestep = hardship_params.TIME_STEP.value\n transmission_rate = hardship_params.TRANSMISSION_RATE.value\n hardship = hardship_params.HARDSHIP.value\n\n for n in self.model.G.neighbors(self.network_node): # Network neighbors\n agent = self.model.network_dict[n] # Get agent at the neighbor node\n if agent.state == State.ACTIVE: # If the agent is active state\n hardship += (distance * timestep * transmission_rate * \n agent.influence * agent.expression_intensity * self.susceptibility)\n\n return hardship\n\n def set_influencer(self, connections, threshold):\n \"\"\"\n Determine if civilian agent is an influencer\n \"\"\"\n if connections > threshold:\n self.influencer = True\n else: \n self.influencer = False\n\n def get_network_neighbors(self):\n \"\"\"\n Retrieve neighbors to this agent in the social network.\n \"\"\"\n\n self.network_neighbors = (list(self.model.G.neighbors(self.network_node)))\n\n\nclass Cop(Agent):\n \"\"\"\n Create a new law enforcement officer agent.\n \"\"\"\n\n def __init__(self, unique_id, model, pos, vision):\n \"\"\"\n Create a new law enforcement officer agent.\n :param unique_id: unique id of the agent\n :param model: model to which agent belongs to\n :param pos: position of the agent in the space\n :param vision: number of cells visible for each direction (N,S,E,W)\n \"\"\"\n\n super().__init__(unique_id, model)\n self.unique_id = unique_id\n self.model = model\n self.pos = pos\n self.vision = vision\n self.state = State.COP\n\n # Data collector fix\n self.hardship = None\n self.grievance = None\n self.influencer = None\n self.network_neighbors = None\n self.influence = None\n\n self.neighbors = [] # Neighbors in MultiGrid space\n self.empty_cells = [] # Empty cells around the agent in MultiGrid space\n\n def step(self):\n \"\"\"\n Inspect vision and arrest a random agent. Move there\n \"\"\"\n self.update_neighbors()\n active_neighbors = []\n \n # Check for all active neighbors in vision\n for agent in self.neighbors:\n if type(agent).__name__.upper() == 'CITIZEN' \\\n and agent.state is State.ACTIVE \\\n and agent.jail_sentence == 0 \\\n and agent.jailable:\n active_neighbors.append(agent)\n\n # If there are any active arrest one randomly and move there\n if active_neighbors:\n arrestee = random.choice(active_neighbors)\n sentence = random.randint(1, self.model.max_jail_term)\n arrestee.jail_sentence = sentence\n arrestee.state = State.JAILED\n new_pos = arrestee.pos\n self.model.jailings_list[0] += 1\n\n if sentence > 0:\n self.model.remove_agent_grid(arrestee)\n if self.model.movement:\n self.model.grid.move_agent(self, new_pos)\n\n # No active citizens, move to random empty cell\n elif self.model.movement and self.empty_cells:\n new_pos = random.choice(self.empty_cells)\n self.model.grid.move_agent(self, new_pos)\n\n def update_neighbors(self):\n \"\"\"\n Create a list of neighbors & empty neighbor cells\n \"\"\"\n # Moore = False because we check N/S/E/W\n neighborhood = self.model.grid.get_neighborhood(self.pos, moore=False, radius=self.vision)\n self.neighbors = self.model.grid.get_cell_list_contents(neighborhood)\n self.empty_cells = [c for c in neighborhood if self.model.grid.is_cell_empty(c)]\n" }, { "alpha_fraction": 0.5617470145225525, "alphanum_fraction": 0.5786897540092468, "avg_line_length": 28.175825119018555, "blob_id": "accb9f496bf5d126d98d673eae89334742abdd15", "content_id": "54a518d015b42a2bebdd428d35982be10b0e33b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2656, "license_type": "no_license", "max_line_length": 111, "num_lines": 91, "path": "/civil_violence/graphics_portrayal.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "from utils import linear_gradient\nfrom constant_variables import State, Shape, Color\nfrom civil_violence_agents import Citizen\n\n# we generate an array of hex values in between start and end hex values\n# in order to represent agents with an array of susceptibility values\n# in the grid\ngrad_grievance = linear_gradient(\"#FFD1D7\", \"#860110\", n=100)['hex']\n\n\ndef get_agent_portrayal(agent):\n \"\"\"\n Generate a portrayal of the agent (citizen, cop, etc.(\n :param agent: agent to be portrayed\n :return: json-ready dictionary\n \"\"\"\n\n portrayal = {\n \"Shape\": Shape[type(agent).__name__.upper()].value,\n \"x\": agent.pos[0], \"y\": agent.pos[1],\n \"Filled\": \"true\",\n \"Color\": Color[agent.state.name].value,\n \"r\": .8,\n \"w\": 0.7,\n \"h\": 0.7,\n \"Layer\": 0,\n \"Agent\": agent.unique_id,\n }\n\n return portrayal\n\n\ndef get_network_portrayal(model):\n \"\"\"\n Generate a portrayal (JSON-ready dictionary used by the relevant JavaScript code (sigma.js) to draw shapes)\n :param graph: Generated networkx graph representing social network\n :return:\n \"\"\"\n\n portrayal = dict()\n portrayal[\"nodes\"] = [\n {\n # Main attributes\n \"id\": agent.network_node,\n \"label\": \"{}\".format(agent.unique_id),\n # Display attributes\n \"size\": 3,\n \"color\": Color[agent.state.name].value,\n }\n for agent in model.citizen_list\n ]\n\n portrayal[\"edges\"] = [\n {\n \"id\": edge_id,\n \"source\": source,\n \"target\": target,\n \"color\": Color.JAILED if model.network_dict[source].state == State.JAILED\n or model.network_dict[target].state == State.JAILED\n else \"#000000\"\n }\n for edge_id, (source, target) in enumerate(model.G.edges)\n ]\n\n return portrayal\n\n\ndef get_grievance_portrayal(agent):\n \"\"\"\n Generate a portrayal of the agent grievance\n (JSON-ready dictionary used by the relevant JavaScript code (sigma.js) to draw shapes)\n :param agent: agent which grievance must be to portrayed\n :return: json-ready dictionary\n \"\"\"\n portrayal = {\n \"Shape\": \"rect\",\n \"x\": agent.pos[0], \"y\": agent.pos[1],\n \"Filled\": \"true\",\n \"Color\": \"#000000\",\n \"w\": 0.7,\n \"h\": 0.7,\n \"Layer\": 1,\n \"Agent\": agent.unique_id,\n }\n\n if isinstance(agent, Citizen):\n grievance_value = int(agent.grievance * 100)\n portrayal[\"Color\"] = grad_grievance[grievance_value]\n portrayal[\"Grievance\"] = int(agent.grievance * 100)\n\n return portrayal\n\n" }, { "alpha_fraction": 0.6135095953941345, "alphanum_fraction": 0.6149556040763855, "avg_line_length": 43.210044860839844, "blob_id": "71fc687d2de4db4ef264fc4e65944d14ebc66108", "content_id": "4f9441e66b2248b75df4d9bfd9794596f4cb7da2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9682, "license_type": "no_license", "max_line_length": 120, "num_lines": 219, "path": "/civil_violence/batchrunner_mp.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "from mesa.batchrunner import BatchRunner\nfrom multiprocessing import Pool, cpu_count\nfrom tqdm import tqdm\nfrom collections import OrderedDict\n\n\nclass BatchRunnerMP(BatchRunner):\n \"\"\"\n Child class of BatchRunner, extended with multiprocessing support.\n\n BatchRunnerMP bug handling method:\n This local class overwrite BatchRunnerMP class provided by mesa.\n It resolves the bug making not possible to use \"run_all\" method for sensitivity analysis.\n Note: You must give function name instead of lambda in data collector argument. Lambda can't be handle by\n multiprocessing pool, however mesa can collect data from model by simply indicated the function name.\n With this method you do not need to update datacollector class to make multiprocessing run.\n \"\"\"\n\n def __init__(self, model_cls, nr_processes=None, **kwargs):\n \"\"\"Create a new BatchRunnerMP for a given model with the given\n parameters.\n\n model_cls: The class of model to batch-run.\n nr_processes: int\n the number of separate processes the BatchRunner\n should start, all running in parallel.\n kwargs: the kwargs required for the parent BatchRunner class\n \"\"\"\n if nr_processes is None:\n # identify the number of processors available on users machine\n available_processors = cpu_count()\n self.processes = available_processors\n print(\"BatchRunner MP will use {} processors.\".format(self.processes))\n else:\n self.processes = nr_processes\n\n super().__init__(model_cls, **kwargs)\n self.pool = Pool(self.processes)\n\n def _make_model_args_mp(self):\n \"\"\"Prepare all combinations of parameter values for `run_all`\n Due to multiprocessing requirements of @StaticMethod takes different input, hence the similar function\n Returns:\n List of list with the form:\n [[model_object, dictionary_of_kwargs, max_steps, iterations]]\n \"\"\"\n total_iterations = self.iterations\n all_kwargs = []\n\n count = len(self.parameters_list)\n if count:\n for params in self.parameters_list:\n kwargs = params.copy()\n kwargs.update(self.fixed_parameters)\n # run each iterations specific number of times\n for iter in range(self.iterations):\n kwargs_repeated = kwargs.copy()\n all_kwargs.append(\n [\n self.model_cls,\n kwargs_repeated,\n self.max_steps,\n iter,\n self.model_reporters, # We add model_reporters and agent_reporters in order to by-pass\n self.agent_reporters # the impossibility to transmit model in multi-processing pool\n ]\n )\n\n elif len(self.fixed_parameters):\n count = 1\n kwargs = self.fixed_parameters.copy()\n all_kwargs.append(kwargs)\n\n total_iterations *= count\n\n return all_kwargs, total_iterations\n\n @staticmethod\n def _run_wrappermp(iter_args):\n \"\"\"\n Based on requirement of Python multiprocessing requires @staticmethod decorator;\n this is primarily to ensure functionality on Windows OS and does not impact MAC or Linux distros\n\n BatchRunnerMP bug handling method:\n Instead of transmitting the model (like in original BatchRunnerMP) which is not possible in multiprocessing\n pool, we obtain the necessary data (data_collector, model_var, agent_var) in upstream.\n Note: we should give a function name instead of a lambda to [data_collector, model_var, agent_var] object since\n lambda can't be handled by multiprocessing pool.\n\n :param iter_args: List of arguments for model run\n iter_args[0] = model object\n iter_args[1] = key word arguments needed for model object\n iter_args[2] = maximum number of steps for model\n iter_args[3] = number of time to run model for stochastic/random variation with same parameters\n :return:\n tuple of param values which serves as a unique key for model results\n model object\n \"\"\"\n\n model_i = iter_args[0]\n kwargs = iter_args[1]\n max_steps = iter_args[2]\n iteration = iter_args[3]\n model_reporters = iter_args[4] # Received in the arguments from _make_model_args_mp. By getting reporter values\n agent_reporters = iter_args[5] # here, we don't need to pass model class in multi-processing pool\n\n # instantiate version of model with correct parameters\n model = model_i(**kwargs)\n while model.running and model.schedule.steps < max_steps:\n model.step()\n\n # add iteration number to dictionary to make unique_key\n kwargs[\"iteration\"] = iteration\n\n # convert kwargs dict to tuple to make consistent\n param_values = tuple(kwargs.values())\n data_collector = None\n model_var = OrderedDict()\n agent_var = OrderedDict()\n\n if hasattr(model, \"datacollector\"):\n data_collector = model.datacollector\n\n if model_reporters:\n\n for var, reporter in model_reporters.items():\n model_var[var] = reporter(model)\n\n if agent_reporters:\n for agent in model.schedule._agents.values():\n agent_record = OrderedDict()\n for var, reporter in agent_reporters.items():\n agent_record[var] = getattr(agent, reporter)\n agent_var[agent.unique_id] = agent_record\n\n # Instead of transmitting the model (like in original BatchRunnerMP) which is not possible in multiprocessing\n # pool, we obtain the necessary data (data_collector, model_var, agent_var) in upstream.\n # Note: we should give a function name instead of a lambda to [data_collector, model_var, agent_var] since\n # lambda can't be handled by multiprocessing pool.\n return param_values, data_collector, model_var, agent_var\n\n def _result_prep_mp(self, results):\n \"\"\"\n Helper Function\n Takes results from Processpool and single processor debug run and fixes format to\n make compatible with BatchRunner Output\n\n BatchRunnerMP bug handling method:\n Model object can't be transmitted through multiprocessing pool, upstream data processing enable us to solve\n this issue.\n\n :param results: A tuple of datacollector, model_var and agent_var pre-processed data.\n :updates model_vars and agents_vars so consistent across all batchrunner\n \"\"\"\n # Take results and convert to dictionary so dataframe can be called\n for model_key, (datacollector, model_var, agent_var) in results.items():\n if self.model_reporters:\n self.model_vars[model_key] = model_var # Fix to original BatchRunnerMP\n\n if self.agent_reporters:\n agent_vars = agent_var\n for agent_id, reports in agent_vars.items():\n agent_key = model_key + (agent_id,)\n self.agent_vars[agent_key] = reports # Fix to original BatchRunnerMP\n\n if datacollector is not None:\n if datacollector.model_reporters is not None:\n self.datacollector_model_reporters[\n model_key\n ] = datacollector.get_model_vars_dataframe()\n if datacollector.agent_reporters is not None:\n self.datacollector_agent_reporters[\n model_key\n ] = datacollector.get_agent_vars_dataframe()\n\n # Make results consistent\n if len(self.datacollector_model_reporters.keys()) == 0:\n self.datacollector_model_reporters = None\n if len(self.datacollector_agent_reporters.keys()) == 0:\n self.datacollector_agent_reporters = None\n\n def run_all(self):\n \"\"\"\n Run the model at all parameter combinations and store results,\n overrides run_all from BatchRunner.\n \"\"\"\n\n run_iter_args, total_iterations = self._make_model_args_mp()\n # register the process pool and init a queue\n # store results in ordered dictionary\n results = {}\n\n if self.processes > 1:\n with tqdm(total_iterations, disable=not self.display_progress) as pbar:\n # (data_collector, model_var, agent_var) replace model variable which can't be transmitted\n for params, data_collector, model_var, agent_var in self.pool.imap_unordered(\n self._run_wrappermp, run_iter_args\n ):\n results[params] = (data_collector, model_var, agent_var)\n pbar.update()\n\n self._result_prep_mp(results)\n # For debugging model due to difficulty of getting errors during multiprocessing\n else:\n for run in run_iter_args:\n params, data_collector, model_var, agent_var = self._run_wrappermp(run)\n results[params] = (data_collector, model_var, agent_var)\n\n self._result_prep_mp(results)\n\n # Close multi-processing\n self.pool.close()\n\n return (\n getattr(self, \"model_vars\", None),\n getattr(self, \"agent_vars\", None),\n getattr(self, \"datacollector_model_reporters\", None),\n getattr(self, \"datacollector_agent_reporters\", None),\n )\n" }, { "alpha_fraction": 0.601844072341919, "alphanum_fraction": 0.6132997870445251, "avg_line_length": 34.4455451965332, "blob_id": "974dd51a523f4a121d49f7a6dd3c0a4fe945a051", "content_id": "dfcbba19107238294b1217cfcf4f7bb1fb5b2b2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3579, "license_type": "no_license", "max_line_length": 119, "num_lines": 101, "path": "/civil_violence/sobol_mp.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import time\nimport pandas as pd\nimport numpy as np\nfrom SALib.sample import saltelli\nfrom tqdm import tqdm\nfrom multiprocessing import Pool, cpu_count\nfrom mesa.batchrunner import BatchRunner\nfrom civil_violence_model import CivilViolenceModel\nfrom utils import *\n\n\ndef sobol_analysis_no_network(problem):\n replicates = 4\n max_steps = 150\n distinct_samples = 100\n\n model_reporters = {\"QUIESCENT\": compute_quiescent,\n \"ACTIVE\": compute_active,\n \"JAILED\": compute_jailed,\n \"OUTBREAKS\": compute_outbreaks,\n \"LEGITIMACY\": compute_legitimacy}\n\n # We get all our samples here\n param_values = saltelli.sample(problem, distinct_samples)\n data = pd.DataFrame(index=range(replicates*len(param_values)),\n columns=['active_threshold_t', 'initial_legitimacy_l0', 'max_jail_term'])\n\n data['Run'], data['QUIESCENT'], data['ACTIVE'], data['JAILED'], data['OUTBREAKS'], data['LEGITIMACY'] = \\\n None, None, None, None, None, None\n\n column_order = ['Run', 'QUIESCENT', 'ACTIVE', 'JAILED', 'OUTBREAKS', 'LEGITIMACY']\n available_processors = cpu_count()\n print(\"Sobol MP will use {} processors.\".format(available_processors))\n pool = Pool(available_processors)\n\n run_iter_args = enumerate([[max_steps, model_reporters, list(v)] for _ in range(replicates) for v in param_values])\n print(\"Number steps is {}. Starting ...\".format(len(param_values) * replicates))\n\n with tqdm((len(param_values) * (replicates)), disable=False) as pbar:\n for count, vals, iteration_data in pool.imap_unordered(_mp_function, run_iter_args):\n data.iloc[count, 0:3] = vals\n data.loc[count, column_order] = iteration_data.loc[0, column_order]\n print(f'{count / (len(param_values) * (replicates)) * 100:.2f}% done')\n\n if count % 200 == 0:\n path_tmp = 'archives/progress_data_sobol_{0}.npy'.format(int(time.time()))\n with open(path_tmp, 'ab') as f:\n np.save(f, data)\n print(\"Progress saved in the file {:s}\".format(path_tmp))\n\n pbar.update()\n\n # Close multi-processing\n pool.close()\n\n path = 'archives/saved_data_sobol_{0}.npy'.format(int(time.time()))\n with open(path, 'ab') as f:\n np.save(f, data)\n print(\"Results saved in file {:s}\".format(path))\n\n return data\n\n\ndef _mp_function(input):\n count, iter_args = input\n\n max_steps = iter_args[0]\n model_reporters = iter_args[1]\n vals = iter_args[2]\n\n vals[2] = int(vals[2])\n\n variable_parameters = {}\n names = ['active_threshold_t', 'initial_legitimacy_l0', 'max_jail_term']\n for name, val in zip(names, vals):\n variable_parameters[name] = val # dictionary\n\n batch = BatchRunner(CivilViolenceModel,\n max_steps=max_steps,\n variable_parameters={name: [] for name in names},\n model_reporters=model_reporters)\n\n batch.run_iteration(variable_parameters, tuple(vals), count)\n iteration_data = batch.get_model_vars_dataframe()\n iteration_data['Run'] = count # Don't know what causes this, but iteration number is not correctly filled\n\n return count, vals, iteration_data\n\n\ndef sobol_main():\n problem = {\n 'num_vars': 3,\n 'names': ['active_threshold_t', 'initial_legitimacy_l0', 'max_jail_term'],\n 'bounds': [[0.01, 1], [0.01, 1], [1, 100]]\n }\n\n sobol_analysis_no_network(problem)\n\n\nif __name__ == '__main__':\n sobol_main()" }, { "alpha_fraction": 0.5969125032424927, "alphanum_fraction": 0.6125949621200562, "avg_line_length": 28.781021118164062, "blob_id": "7b6b4fed130d99115c68a11c7248eefa85985339", "content_id": "63ea5a0e764395094e7069aa83a470c2fa23e5f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4081, "license_type": "no_license", "max_line_length": 108, "num_lines": 137, "path": "/civil_violence/figure.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport networkx as nx\n\n\ndef create_fig(data, draw=False):\n\n if draw is not False:\n df = pd.DataFrame(data)\n df.columns = ['Node', 'Edges']\n \n fig = sns.displot(df['Edges'], bins=max(df['Edges'])+1, kde=True)\n\n plt.title('Amount of edges per node')\n plt.show()\n\n else:\n pass\n\ndef run_analysis(df, draw=False):\n\n df.reset_index(inplace=True)\n print(df)\n df = df.drop(df[df['State'] == 'State.COP'].index)\n \n fig, axs = plt.subplots(3,2)\n ax = axs.flatten()\n\n # Draw grievance figure\n df_griev = df[['Step', 'Grievance']].groupby('Step').mean()\n df_griev.plot(ax=ax[0])\n\n # Draw hardship\n df_hard = df[['Step', 'Hardship']].groupby('Step').mean()\n df_hard.plot(ax=ax[1])\n\n # Draw State\n df_state = pd.pivot_table(df, values='Legitimacy', index='Step', columns=['State'], aggfunc='count')\n df_state = df_state.fillna(0)\n df_state.plot(ax=ax[2])\n print(df_state)\n\n # Draw legitimicy\n df_leg = df[['Step', 'Legitimacy']].groupby('Step').mean()\n df_leg.plot(ax=ax[3])\n \n # Draw Influencer\n df_infl = pd.pivot_table(df, values='Legitimacy', index='Step', columns=['Influencer'], aggfunc='count')\n df_infl.plot(ax=ax[4])\n\n # Draw connections\n df['N_connections'] = [len(_) for _ in df['N_connections']]\n df_conn = df.loc[df['Step'] == 1] \n sns.distplot(df['N_connections'], kde=True, ax=ax[5])\n\n plt.savefig('mypng.png')\n\n\ndef draw_multiple_graphs(num_nodes=100, p=0.2, p_ws=0.1, seed=None):\n\n # Erdos Renyi\n erdos = nx.generators.random_graphs.erdos_renyi_graph(num_nodes, p)\n erdos_small = nx.generators.random_graphs.erdos_renyi_graph(int(num_nodes/5), p)\n \n # Watts Strogatz\n k = int((num_nodes-1)*p)\n watts = nx.generators.random_graphs.watts_strogatz_graph(num_nodes, k, p)\n\n k_small = int(((num_nodes/5)-1)*p)\n watts_small = nx.generators.random_graphs.watts_strogatz_graph(int(num_nodes/5), k_small, p)\n\n # Barabasi alber\n m = int((p*num_nodes-1)/2)\n barabasi = nx.generators.random_graphs.barabasi_albert_graph(num_nodes, m)\n\n m_small = int((p*(num_nodes/5)-1)/2)\n barabasi_small = nx.generators.random_graphs.barabasi_albert_graph(int(num_nodes/5), m_small)\n\n fig, axs = plt.subplots(3, 3)\n ax = axs.flatten()\n\n # draw erdos\n nx.draw_circular(erdos_small, node_size=10, font_size=5, ax=ax[0])\n # nx.draw_circular(erdos, node_size=10, ax=ax[3])\n df = pd.DataFrame(list(nx.clustering(erdos).items()))\n df.columns = ['Node', 'Clustering']\n sns.distplot(df['Clustering'], kde=True, ax=ax[3])\n\n df = pd.DataFrame(erdos.degree())\n df.columns = ['Node', 'Edges']\n sns.distplot(df['Edges'], kde=True, ax=ax[6])\n\n # draw watts\n nx.draw_circular(watts_small, node_size=10, ax=ax[1])\n\n df = pd.DataFrame(list(nx.clustering(watts).items()))\n df.columns = ['Node', 'Clustering']\n sns.distplot(df['Clustering'], kde=True, ax=ax[4])\n\n df = pd.DataFrame(watts.degree())\n df.columns = ['Node', 'Edges']\n sns.distplot(df['Edges'], kde=True, ax=ax[7])\n\n # draw barabasi\n nx.draw_circular(barabasi_small, node_size=10, ax=ax[2])\n\n df = pd.DataFrame(list(nx.clustering(barabasi).items()))\n df.columns = ['Node', 'Clustering']\n sns.distplot(df['Clustering'], kde=True, ax=ax[5])\n\n df = pd.DataFrame(barabasi.degree())\n df.columns = ['Node', 'Edges']\n sns.distplot(df['Edges'], kde=True, ax=ax[8])\n \n # create_fig(erdos.edges, draw=True)\n # Draw the graphs\n # nx.draw(graph)\n for _ in ax:\n _.set_ylabel('')\n _.set_xlabel('')\n\n ax[0].set_title('Ernos-Renyi', size=10)\n ax[1].set_title('Watts-Strogatz', size=10)\n ax[2].set_title('Barabasi-Albert', size=10)\n\n ax[0].set_ylabel('Network')\n ax[3].set_ylabel('Fraction of cluster coefficient')\n ax[6].set_ylabel('Fraction')\n\n ax[4].set_xlabel('Cluster Coefficient')\n ax[7].set_xlabel('Degree distribution')\n plt.show()\n\n\nif __name__ == '__main__':\n draw_multiple_graphs()\n\n" }, { "alpha_fraction": 0.7610437273979187, "alphanum_fraction": 0.7701991200447083, "avg_line_length": 46.46739196777344, "blob_id": "92115909dc083ec67ae09afc831854bd7bb442f2", "content_id": "3ef81d468b8d2f6fe44412b9f7eea1c7bc79291a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4369, "license_type": "no_license", "max_line_length": 131, "num_lines": 92, "path": "/civil_violence/README.md", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "# Civil violence with social network model\n\n## How to run a demonstration\n\n### Packages required\n\n```\nmesa = \"~=0.8.8\"\nnetworkx = \"~2.5\"\npandas = \"~=1.1\"\nnumpy = \"~=1.19\"\nmatplotlib = \"~=3.3\"\nseaborn = \"~=0.11\"\njupyter = \"~=1.0\"\nipynb = \"~=0.5\"\nsalib = \"~=1.3\"\n```\n\n### Ready-to-use\n\nA ready-to-use demonstration is available, in command line, by running the command:\n```\npython3 server.py\n```\n\nIt will open an example of civil violence with social network ABM in web inteface, using the default configuration file.\n\n\n### Personalized\n\nThe configuration files (./configurations/*.json) enforce fixed parameters which can't be updated in the web interface by the user.\nTherefore, if the user wan't to have full control of the ABM, he should update the default configuration file \n(./configurations/default.json) and remove the fixed parameters he want to manipulate.\n\n## Screenshots\n\n<p float=\"left\">\n <img src=\"../report/pictures/demonstration/spread_1.png\" width=\"300px\" alt=\"beginning\"/>\n <img src=\"../report/pictures/demonstration/spread_2.png\" width=\"300px\" alt=\"spread\"/>\n <img src=\"../report/pictures/demonstration/spread_3.png\" width=\"300px\" alt=\"full\"/>\n</p>\n\n## Architecture of the project\n\n### ABM model\n- server.py :Mesa server, set-up the ABM. Define the user-controlled parameters, interactive figures in the web \ninterface and configuration used.\n- civil_violence_model.py: Implementation of civil violence with social networks model. Define the attributes used by \nthe model, the schedule (agent steps + update of attributes), methods to control agents on the model (add, remove, etc) \nand data collection methods for the model and agents during the simulation.\n- civil_violence_agents.py: Implementation of agents used by civil violence model. This file contains definition of \nstates attributes and actions of the civilian/influencer and cop agents.\n- graph_utils.py: Implementation of social networks. Define different graph type, add model agents to graph, \nprint method, etc.\n- utils.py: Various function utilities used in the code base: read archived data, count, stats, \ncolor code converter, etc.\n- graphics_portrayal.py: Define portrayal of agent, networks, etc. which will be visualized in the web interface.\n- constant_variables.py: Constants shared by multiple algorithms in the codebase: shape, color, types, etc.\n- configurations/: Configuration files used to fix parameters (user-controlled parameters set in this file can't \nbe change by the user)\n\n\n### Analysis & Experiments\n\n- batchrunner_mp.py: This local class overwrite BatchRunnerMP class provided by mesa. \nIt resolves the bug making not possible to use \"run_all\" method for sensitivity analysis.\n- ofat_mp.py: One-factor-at-a-time (OFAT) sensitivity analysis of civil violence model with network (no bias). Work \nwith multiprocessing.\n- ofat_plot.py: Function to load ofat archived data and plot the analysis results\n- ofat_post_processing.py: Additionnal processing of the ofat data to get statistics on outbreaks \n(peak height, duration, frequency, etc.) \n- sobol_mp.py: Sobol sensitivity analysis of civil violence model with network (no bias). Implemented to \nhandle multiprocessing.\n- sobol_plot.py: Function to load sobol archived data and plot the analysis results\n- experiment_1.py: Generates data which are used for comparison of network topology influence on civil violence model.\n- figure.py: Analysis of Erdos Renyi, Watts Strogatz and Barabasi alber graph topologies. Study cluster coefficient \nand degree distribution.\n- jupyter_notebook/statistical_analysis: Statistical analysis of ABM data.\n- archives/: Data from sensitivity analysis are archived in this directory.\n- output/: Results from experiments are archived in this directory.\n\n## Earlier work\n\nSome earlier work reviewed before working on this implementation involve: \n- civil violence with propaganda agent model: https://github.com/fabero/Civil-Violence-Modelling-A05\n- original epstein model: https://github.com/projectmesa/mesa-examples/tree/master/examples/EpsteinCivilViolence\n\nWe highlights that that color converter from fabero repository was re-used un-touch in our project (see utility file)\nto get gradient color for our grievance canvas element.\n\nSuggestions from https://github.com/projectmesa/mesa/issues/787 helped us as well to fix BatchRunnerMP mesa class in\nour project.\n\n\n" }, { "alpha_fraction": 0.6539537310600281, "alphanum_fraction": 0.6615815162658691, "avg_line_length": 27.507246017456055, "blob_id": "9041c86c4d876043ac3156da49d18535fb091163", "content_id": "c72e464a6766782bd43b2745c3a105047178fd2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3933, "license_type": "no_license", "max_line_length": 98, "num_lines": 138, "path": "/civil_violence/utils.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import json\n\n\ndef get_average_number_neighbours(density, vision):\n \"\"\"\n Determine average number of neighbours\n :param density: population density\n :param vision: vision in 4 directions N/S/E/W\n :return: expected number of neighbours: 4 * number of cells per direction * population density\n \"\"\"\n return 4 * vision * density\n\n\ndef read_configuration(filepath='./configurations/default.json'):\n \"\"\"\n Read configuration of the model store in json files.\n JSON let us store values of different types (string, int, float, booleans, etc.).\n :param filepath: Relative path to the configuration file\n :return a dictionary of configuration values\n \"\"\"\n with open(filepath, 'r') as f:\n configuration = json.load(f)\n\n return configuration\n\n\ndef hex_to_rgb(hex):\n \"\"\"\n Convert hexadecimal to RGB\n Original code from https://github.com/fabero/Civil-Violence-Modelling-A05\n \"\"\"\n # Pass 16 to the integer function for change of base\n return [int(hex[i:i+2], 16) for i in range(1,6,2)]\n\n\ndef rgb_to_hex(RGB):\n \"\"\"\n Convert RGB to hexadecimal\n Original code from https://github.com/fabero/Civil-Violence-Modelling-A05\n \"\"\"\n # Components need to be integers for hex to make sense\n RGB = [int(x) for x in RGB]\n return \"#\"+\"\".join([\"0{0:x}\".format(v) if v < 16 else\n \"{0:x}\".format(v) for v in RGB])\n\n\ndef color_dict(gradient):\n \"\"\"\n Takes in a list of RGB sub-lists and returns dictionary of\n colors in RGB and hex form for use in a graphing function\n defined later on.\n Original code from https://github.com/fabero/Civil-Violence-Modelling-A05\n \"\"\"\n return {\"hex\": [rgb_to_hex(RGB).upper() for RGB in gradient]}\n\n\ndef linear_gradient(start_hex, finish_hex=\"#FFFFFF\", n=10):\n \"\"\"\n Returns a gradient list of (n) colors between\n two hex colors. start_hex and finish_hex\n should be the full six-digit color string,\n including the number sign (\"#FFFFFF\").\n Original code from https://github.com/fabero/Civil-Violence-Modelling-A05\n \"\"\"\n # Starting and ending colors in RGB form\n s = hex_to_rgb(start_hex)\n f = hex_to_rgb(finish_hex)\n\n # Initialize a list of the output colors with the starting color\n RGB_list = [s]\n\n # Calculate a color at each evenly spaced value of t from 1 to n\n for t in range(1, n):\n # Interpolate RGB vector for color at the current value of t\n curr_vector = [\n int(s[j] + (float(t)/(n-1))*(f[j]-s[j]))\n for j in range(3)\n ]\n # Add it to our list of output colors\n RGB_list.append(curr_vector)\n\n return color_dict(RGB_list)\n\n\ndef compute_quiescent(model):\n \"\"\"\n Return number of quiescent agent\n :param model : civil violence model class instance\n \"\"\"\n return model.count_type_citizens(\"QUIESCENT\")\n\n\ndef compute_active(model):\n \"\"\"\n Return number of active agent\n :param model : civil violence model class instance\n \"\"\"\n return model.count_type_citizens(\"ACTIVE\")\n\n\ndef compute_jailed(model):\n \"\"\"\n Return number of jailed agent\n :param model : civil violence model class instance\n \"\"\"\n return model.count_type_citizens(\"JAILED\")\n\n\ndef compute_legitimacy(model):\n \"\"\"\n Return central authority current legitimacy\n :param model : civil violence model class instance\n \"\"\"\n return model.legitimacy\n\n\ndef compute_influencers(model):\n \"\"\"\n Return list of influencers\n :param model : civil violence model class instance\n \"\"\"\n return len(model.influencer_list)\n\n\ndef compute_outbreaks(model):\n \"\"\"\n Return number of outbreaks until the current step of the simulation\n :param model : civil violence model class instance\n \"\"\"\n return model.outbreaks\n\n\ndef compute_datacollector(model):\n \"\"\"\n Return data collecor\n :param model : civil violence model class instance\n \"\"\"\n return model.datacollector" }, { "alpha_fraction": 0.593069314956665, "alphanum_fraction": 0.6009901165962219, "avg_line_length": 38.60784149169922, "blob_id": "fa09aeaf6b2e760f8cab0fb7566a35096e846c1e", "content_id": "6de9d171edc72f96fc5f6f8db85827cebf62ce7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2020, "license_type": "no_license", "max_line_length": 115, "num_lines": 51, "path": "/civil_violence/experiment_1.py", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "import time\nimport numpy as np\nfrom mesa.batchrunner import BatchRunner\nfrom civil_violence_model import CivilViolenceModel\nfrom utils import read_configuration\n\n\ndef experiment_1(replicates=40, max_steps=200, graph_type=\"None\"):\n \"\"\"\n Experiment 1 - Run simulations of civil violence with network model.\n Function to generates data which are used for comparison of network topology influence on civil violence model.\n \"\"\"\n path = 'archives/saved_data_experiment_1_{0}_{1}'.format(int(time.time()), graph_type)\n\n configuration = read_configuration()\n model_params = {}\n model_params.update(configuration) # Overwritten user parameters don't appear in the graphic interface\n model_params.update({'seed': None})\n model_params['graph_type'] = graph_type\n model_params['max_iter'] = max_steps\n\n batch = BatchRunner(CivilViolenceModel,\n max_steps=max_steps,\n iterations=replicates,\n fixed_parameters=model_params,\n model_reporters={'All_Data': lambda m: m.datacollector,\n \"QUIESCENT\": lambda m: m.count_type_citizens(\"QUIESCENT\"),\n \"ACTIVE\": lambda m: m.count_type_citizens(\"ACTIVE\"),\n \"JAILED\": lambda m: m.count_type_citizens(\"JAILED\"),\n \"OUTBREAKS\": lambda m: m.outbreaks}, # attempt all\n display_progress=True)\n\n batch.run_all()\n\n batch_df = batch.get_model_vars_dataframe()\n batch_df = batch_df.drop('All_Data', axis=1)\n\n data = batch_df\n run_data = batch.get_collector_model()\n\n with open(path, 'ab') as f:\n np.save(f, data)\n\n run_path = path+'_run'\n with open(run_path, 'ab') as f:\n np.save(f, run_data)\n\n\nif __name__ == '__main__':\n # Graph_type to be changed to compare influence of each networks\n experiment_1(replicates=2, max_steps=200, graph_type=\"None\")\n" }, { "alpha_fraction": 0.7651821970939636, "alphanum_fraction": 0.7651821970939636, "avg_line_length": 14.4375, "blob_id": "a953eae2b1894b020f59b18ce09acc0419c90d32", "content_id": "682a3f02c4d98dad2612fea790a8ffe961641b6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 247, "license_type": "no_license", "max_line_length": 54, "num_lines": 16, "path": "/report/README.md", "repo_name": "vdesgrange/agent_based_modelling", "src_encoding": "UTF-8", "text": "# How to compile latex\n\nRun the following command to compile the report:\n\n```\npdflatex report.tex\nbibtex report\npdflatex report.tex\npdflatex report.tex\n```\n\nRun the following command to compile the presentation:\n\n```\npdflatex presentation.tex\n```\n" } ]
18
liibooyaa/hw4-git-practice
https://github.com/liibooyaa/hw4-git-practice
0026224ad54104377519b37e9aaeb4eaf67e35bd
fd3a0a38334ea486c5195317afb8d0e951c982e2
c8482b7ea98db16955a65c0f5e83c55a635ff3d5
refs/heads/master
2021-01-02T13:42:53.329921
2020-02-11T01:15:48
2020-02-11T01:15:48
239,648,010
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 21.5, "blob_id": "35e7c72610a9277e3a1a86ca816ccdb74caf86b3", "content_id": "d949b7d548e1388d49ae006936c527d81e4249c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/hw4_git.py", "repo_name": "liibooyaa/hw4-git-practice", "src_encoding": "UTF-8", "text": "print(\"Hello World\")\n#I am adding something." } ]
1
tristanfisher-archive/iobot
https://github.com/tristanfisher-archive/iobot
666d6b38147e182b9c6622f3876f43c7b55b08a3
a54516abc6182be94195b493a18fc7952f494a86
4e130769a524be8476a1f1b9bba73e019d7e311e
refs/heads/master
2021-01-10T19:56:35.487849
2014-08-11T22:26:58
2014-08-11T22:26:58
37,346,737
1
0
null
2015-06-12T22:03:58
2014-08-03T04:42:56
2014-08-11T22:27:00
null
[ { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.6571428775787354, "avg_line_length": 19.14285659790039, "blob_id": "983947e0f847ed6b140c17d214f38028324d0b0b", "content_id": "b9355b734c648946a702a0b91a274f441b860049", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "permissive", "max_line_length": 46, "num_lines": 7, "path": "/main.py", "repo_name": "tristanfisher-archive/iobot", "src_encoding": "UTF-8", "text": "from plugin_zulip import IOBotZulip\n\n\nif __name__ == '__main__':\n\n test_bot = IOBotZulip()\n test_bot.callback(callback_type='message')" }, { "alpha_fraction": 0.7878788113594055, "alphanum_fraction": 0.8030303120613098, "avg_line_length": 32.5, "blob_id": "e586bcebd92b05b89edde869fe814f0408f3e3c5", "content_id": "288e9dadea6ef7bf2b77a3f085c4db77e2bb470e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 66, "license_type": "permissive", "max_line_length": 55, "num_lines": 2, "path": "/zulip_requirements.txt", "repo_name": "tristanfisher-archive/iobot", "src_encoding": "UTF-8", "text": "simplejson\ngit+git://github.com/zulip/python-zulip.git@2ddd1127506c4e4c91954b8883c592c118552203" }, { "alpha_fraction": 0.6116768717765808, "alphanum_fraction": 0.6116768717765808, "avg_line_length": 25.321428298950195, "blob_id": "f99ea2c7571d486222a8a1fcc036302aede246d1", "content_id": "e9899fce461a8986abb3b889abfbe80c3502d4c4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1473, "license_type": "permissive", "max_line_length": 94, "num_lines": 56, "path": "/iobot.py", "repo_name": "tristanfisher-archive/iobot", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport sys\n\nclass IOBot(object):\n '''\n The primary class for IOBot. Plugins should be bound to this object.\n '''\n\n def __init__(self, handlers=None, name='IOBot'):\n self.name = name\n self._handlers = []\n self.handlers = handlers\n\n #\n # Handler functionality\n #\n\n #Duplicate here for now.. eventually stitch together call/get\n def call_handler(self, handler, *args, **kwargs):\n if handler in self.handlers:\n handler(args, kwargs)\n\n @property\n def handlers(self):\n return self._handlers\n\n def add_handler(self, handler):\n if (handler not in self._handlers) and (handler is not None):\n self._handlers.append(handler)\n\n @handlers.setter\n def handlers(self, handler):\n self.add_handler(handler)\n\n @handlers.deleter\n def handlers(self):\n print(\"Cowardly refusing to delete all handlers. \"\n \"Perhaps you meant to call .remove_handler('handler')?\", file=sys.stderr)\n\n def remove_handler(self, handler):\n if handler in self._handlers:\n self._handlers.remove(handler)\n\n #\n # Help and User Environment\n #\n\n @classmethod\n def help(cls, self):\n return \"IOBot is a pluggable bot. You have reached the help method of \" \\\n \"the IOBot base class, which doesn't really live up to its name for end users.\"\n\nif __name__ == '__main__':\n\n _bot = IOBot()" }, { "alpha_fraction": 0.5496845245361328, "alphanum_fraction": 0.5509238243103027, "avg_line_length": 37.09871292114258, "blob_id": "9c1230297a4e04bcb3dc35cb2f0b6bd54d55cf49", "content_id": "50aa390f70be095ce8bf046a343d1fb5ac0ff105", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8876, "license_type": "permissive", "max_line_length": 136, "num_lines": 233, "path": "/plugin_zulip.py", "repo_name": "tristanfisher-archive/iobot", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nfrom iobot import IOBot\n\nimport zulip\nimport os\nimport shlex\nimport sys\nfrom random import choice\n\n\nclass IOBotZulip(object):\n\n def __init__(self, bot_name='iobot', bot_email=None, bot_api_key=None, debug=False):\n\n self.debug = debug\n self.bot_name = bot_name\n self.bot_email = str(bot_email)\n self.bot_api_key = bot_api_key\n\n #Do this in 2 passes so we don't raise an exception when the attribute was set,\n #but the env key doesn't exist.\n if self.bot_email is None:\n try:\n self.bot_email = os.environ['ZULIP_BOT_EMAIL']\n except:\n raise Exception(\"Zulip bot email address was not provided and could \"\n \"not be populated via the environmental variable: \\nZULIP_BOT_EMAIL\")\n\n if self.bot_api_key is None:\n try:\n self.bot_api_key = os.environ['ZULIP_API_KEY']\n except:\n raise Exception(\"Zulip bot api key address was not provided and could \"\n \"not be populated via the environmental variable: \\nZULIP_API_KEY\")\n\n self._client = zulip.Client(email=self.bot_email, api_key=self.bot_api_key)\n\n # See if the action exists in the subclass before going to parent scope.\n self.bot_actions = ['help']\n self.greetings = ['hi', 'hey', 'hello', 'yo', 'sup', 'greetings', 'omg hi', 'hiya'] # 'omg hi' is a miss on shlex\n\n def user_facing(self, func):\n def register():\n if func not in self.bot_actions:\n self.bot_actions.append(func)\n register()\n\n return func\n\n # -- END OF INIT --\n\n #\n # Helper methods\n #\n #shim for more sophisticated logging later.\n @staticmethod\n def debug_msg(prefix='[DEBUG]>>> ', *args, **kwargs):\n try:\n print(\"%s %s\" % (prefix, args), file=sys.stdout)\n for x, y in kwargs.iteritems():\n print(\"%s %s : %s\" % (prefix, x, y), file=sys.stdout)\n except TypeError, e: # don't break on operand issues\n print(\">>>> Caught error on debug_msg: \" + str(e), file=sys.stderr)\n\n def set_return_key(self, obj, key, default='iobot'):\n if obj.get(key) is None:\n obj.update(key=default)\n\n return obj.get(key)\n\n #\n # User-facing bot actions\n #\n\n def help(self, shlexed_string):\n help_content = '''\n help:\n available zulip actions:\n {bot_actions}\n '''.format(bot_actions=\"\\n \".join(self.bot_actions))\n\n return help_content\n\n def say_hi(self):\n return \"%s %s\" % (choice(self.greetings), ':)')\n\n def parameterized_response(self, msg=''):\n return \"%s\" % msg\n\n\n #\n # Bot action router:\n #\n def parse_handler(self, string, prefix_trigger=False):\n \"\"\"\n this handler does a list lookup by grabbing the first non-whitespace string that is passed to it.\n\n this function is a great candidate for abstracting into a separate module.\n\n prefix_trigger : only parse the content when the incoming content begins with the given string.\n this is useful for listening in a channel -- don't respond to every 'hi'\n \"\"\"\n #lex and lowercase first string\n try:\n shlexed_string = shlex.split(string)\n except ValueError, e: #>iobot '< will crash this\n sys.stderr.write(str(e) + \"\\n\")\n return None\n #To return complaints:\n #return self.parameterized_response(\";__; that input made me feel crash-ey.\")\n\n most_significant_string = str(shlexed_string[:1][0]).lower()\n\n if prefix_trigger: # is it me you're looking for?\n if most_significant_string == prefix_trigger:\n #TODO!\n shlexed_string.remove(prefix_trigger)\n most_significant_string = shlexed_string[0]\n #shlexed_string.pop(0)\n\n else:\n return None # If we expect to get called and we don't, do nothing.\n\n try:\n if (str(shlexed_string[0]).lower() in self.greetings) and (len(shlexed_string) > 0):\n if prefix_trigger: # iobot was popped off shlex (e.g. '->iobot<- hi')\n return self.say_hi()\n if len(shlexed_string) > 1: # hi with more text. #todo: remove?\n if str(shlexed_string[1]).lower() == self.bot_name: # someone is saying hi to us by name!\n return self.say_hi()\n else:\n return self.say_hi()\n except IndexError, e:\n sys.stderr.write('IndexError!' + str(e))\n pass # user string was one string\n\n if most_significant_string in self.bot_actions:\n #If first string is in bot actions, call the method and pass it the rest of the string.\n #Leaving the method with dealing with whether or not it wants to shlex the first char.\n _func = most_significant_string\n\n try: # we shouldn't get here, but don't crash\n if prefix_trigger:\n return getattr(self, _func)(shlexed_string=shlexed_string[1:])\n else:\n return getattr(self, _func)(shlexed_string=shlexed_string[0:])\n except AttributeError:\n return \"Command unknown. Available actions: %s\" % (\", \".join(self.bot_actions))\n else:\n #if we don't have a matching bot action, return a list of actions\n return \"Command unknown. Available actions: %s\" % (\", \".join(self.bot_actions))\n\n #\n # Response mechanisms\n #\n\n def respond_private(self, message, _sender, response):\n # Keep from responding to ourselves\n\n if (_sender != self.bot_email) and (_sender is not None):\n if self.debug:\n IOBotZulip.debug_msg('Sending a private message...')\n self._client.send_message({\n \"type\": \"private\",\n \"subject\": message['subject'],\n \"to\": _sender,\n \"content\": response\n })\n\n def respond_stream(self, message, response):\n if self.debug:\n IOBotZulip.debug_msg(\"Sending message to stream: %s , subject: %s\" %\n (message.get('display_recipient', 'iobot'), self.set_return_key(obj=message, key='subject')))\n self._client.send_message({\n \"type\": \"stream\",\n \"to\": message.get('display_recipient', 'iobot'),\n \"subject\": self.set_return_key(obj=message, key='subject'),\n \"content\": response\n })\n\n\n def respond(self, message=None, channel=None, content=None):\n\n if self.debug:\n debug_output = {\n 'message': message,\n 'channel': channel,\n 'content': content\n }\n IOBotZulip.debug_msg('send_message() debug: ', debug_output)\n\n #suuuuper cheap version of types.NoneType\n if id(message) is not id(None):\n\n _sender = message['sender_email']\n\n #self.bot_email is a str, cast unicode str(_sender) and check that _sender isn't None.\n if (str(_sender) != self.bot_email) and (_sender is not None):\n\n m_type = message.get('type', None)\n\n if m_type == 'stream' or m_type == 'channel':\n response = self.parse_handler(message.get('content', ''), prefix_trigger=self.bot_name)\n if response:\n if self.debug: IOBotZulip.debug_msg('Sending stream response to: %s' % _sender)\n self.respond_stream(message, response)\n elif m_type == 'private':\n response = self.parse_handler(message.get('content', ''), prefix_trigger=False)\n if response:\n if self.debug: IOBotZulip.debug_msg('Sending private response to: %s' % _sender)\n self.respond_private(message, _sender, response)\n else:\n pass\n return\n\n else:\n if self.debug:\n IOBotZulip.debug_msg(\"send_message()-> Sender email was >> %s <<. Refusing to go into a recursive loop.\" % _sender)\n\n def callback(self, callback_type='message'):\n \"\"\"\n Handling the **blocking** call\n \"\"\"\n\n action = self.respond\n\n if callback_type == 'event':\n sys.stderr.write(\"callback()->callback_type: event handler not yet implemented!\")\n self._client.call_on_each_event(lambda message: sys.stdout.write(str(message) + \"\\n\"))\n else:\n self._client.call_on_each_message(lambda msg: action(msg))\n\n bind = callback" }, { "alpha_fraction": 0.6956097483634949, "alphanum_fraction": 0.6995121836662292, "avg_line_length": 26, "blob_id": "c68125046712277550dda83f135551d60ee7c86c", "content_id": "da24a5bd726fc6c86ea63e82e95bded1b25627c3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1025, "license_type": "permissive", "max_line_length": 191, "num_lines": 38, "path": "/README.md", "repo_name": "tristanfisher-archive/iobot", "src_encoding": "UTF-8", "text": "#IO Bot\n======\n\nIO Bot is _YAZBWAHS_ (Yet Another [Zulip](https://zulip.com) Bot Written At [Hacker School](http://hackerschool.com)).\n\n###Features:\n\nIO Bot will-be/is a collection of [objects](https://docs.python.org/2/tutorial/classes.html). -- feel free to simply use it for its [modules](https://docs.python.org/2/tutorial/modules.html).\n\n###How to use:\n\nTo connect IO Bot to Zulip, write a simple Python script:\n\n\tfrom plugin_zulip import IOBotZulip\n\n\tif __name__ == '__main__':\n \ttest_bot = IOBotZulip(bot_email='[email protected]', bot_api_key='zulip_api_key')\n \ttest_bot.callback(callback_type='message')\n \t\n###Plugins:\n\n#####Zulip:\n\nYou can pass the bot's email address and API key via environmental variables:\n\n- ZULIP_BOT_EMAIL\n- ZULIP_API_KEY\n\n\n####Python Version:\n\n IoBot is written for Python 2.7 due to some underlying dependencies. \n\n####To Do:\n\n- Add pluggable handlers. This will work via the following syntax: `test_bot.add_handler('stackoverflow')`.\n- Add tests.\n- Separate logging into a separate module." }, { "alpha_fraction": 0.6320474743843079, "alphanum_fraction": 0.639465868473053, "avg_line_length": 26, "blob_id": "7f924dd89aa5a3437dad6f83ad2b5e7dd66e66e0", "content_id": "6e4dae25e5e70e27bd5bc4ad40ee4c0bde0bd0c0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "permissive", "max_line_length": 61, "num_lines": 25, "path": "/plugin_video.py", "repo_name": "tristanfisher-archive/iobot", "src_encoding": "UTF-8", "text": "from iobot import IOBot\n\npreset_videos = {\n \"pikachu\": \"https://www.youtube.com/watch?v=7aVcisdS8X0\",\n \"boris\": \"https://www.youtube.com/watch?v=b18DjXWyWuc\"\n}\n\nclass IOBotVideo(IOBot):\n\n def __init__(self, preset_videos=preset_videos):\n super(IOBotVideo, self).__init__()\n self._preset_videos = {}\n self.preset_videos = preset_videos\n\n @property\n def preset_videos(self):\n return self._preset_videos\n\n @preset_videos.setter\n def preset_videos(self, dict_entry):\n if isinstance(dict_entry, dict):\n self._preset_videos.update(dict_entry)\n\n def video(self, query):\n return self._preset_videos[query]" } ]
6
dipu2poudel/sudoku_solver
https://github.com/dipu2poudel/sudoku_solver
ba869270964f516ad2811ee0f4d8029710d729b9
e3484b5b6ce82cd5c8109e473e35c7a9fb9fbe50
2897d1cfda53196f9d6d8bfc9c2f853d7cdd66c3
refs/heads/master
2023-05-03T06:01:46.902478
2021-05-28T09:50:31
2021-05-28T09:50:31
371,606,627
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.355053186416626, "alphanum_fraction": 0.40026596188545227, "avg_line_length": 31.22857093811035, "blob_id": "ec3f9fc139d152678bbc7e3c1d253c84ef8b4151", "content_id": "0b116bda8ec95970cac662d14fdd436083923526", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2256, "license_type": "no_license", "max_line_length": 69, "num_lines": 70, "path": "/sudoku_solver.py", "repo_name": "dipu2poudel/sudoku_solver", "src_encoding": "UTF-8", "text": "class Sudoku_Board:\n def __init__(self):\n self.board = [\n [7, 8, 0, 4, 0, 0, 1, 2, 0],\n [6, 0, 0, 0, 7, 5, 0, 0, 9],\n [0, 0, 0, 6, 0, 1, 0, 7, 8],\n [0, 0, 7, 0, 4, 0, 2, 6, 0],\n [0, 0, 1, 0, 5, 0, 9, 3, 0],\n [9, 0, 4, 0, 6, 0, 0, 0, 5],\n [0, 7, 0, 3, 0, 0, 0, 1, 2],\n [1, 2, 0, 0, 0, 7, 4, 0, 0],\n [0, 4, 9, 2, 0, 6, 0, 0, 7]\n ]\n self.rows = len(self.board)\n self.columns = len(self.board[0])\n\n def print_board(self):\n board_GUI = ''\n z = 0\n for row in self.board:\n if z % 3 == 0 and not z == 0:\n board_GUI += '------------------------\\n'\n for i in range(self.rows):\n\n if i % 3 == 0 and not i == 0:\n board_GUI += ' | '\n board_GUI += str(row[i]) + ' '\n if i == self.columns - 1:\n board_GUI = f'{board_GUI}\\n'\n z += 1\n\n print(board_GUI)\n\n def empty_spots_finder(self):\n for i in range(self.rows):\n for j in range(self.columns):\n if self.board[i][j] == 0:\n return i, j\n\n def sudoku_solver(self):\n empty_spot = self.empty_spots_finder()\n if not empty_spot:\n return True\n ro, co = empty_spot\n for i in range(1, 10):\n if self.number_is_valid(i, ro, co):\n self.board[ro][co] = i\n\n if self.sudoku_solver():\n return True\n self.board[ro][co] = 0\n return False\n\n def number_is_valid(self, number, ro, co):\n for i in range(self.rows):\n if self.board[ro][i] == number and co != i:\n return False\n # check column\n if self.board[i][co] == number and ro != i:\n return False\n\n # check boxes\n box_horizontal = (ro // 3) * 3\n box_vertical = (co // 3) * 3\n for i in range(box_horizontal, box_horizontal + 3):\n for j in range(box_vertical, box_vertical + 3):\n if self.board[i][j] == number and (i, j) != (ro, co):\n return False\n\n return True\n" }, { "alpha_fraction": 0.7714285850524902, "alphanum_fraction": 0.7714285850524902, "avg_line_length": 20, "blob_id": "eccf4c9630a1f86ecc9c4d73b25aca3f08776c38", "content_id": "425e23fee520db3d44a71445d9ec8fc998fd720a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/main.py", "repo_name": "dipu2poudel/sudoku_solver", "src_encoding": "UTF-8", "text": "from sudoku_solver import Sudoku_Board\n\nboard = Sudoku_Board()\nboard.sudoku_solver()\nboard.print_board()\n" } ]
2
Ashard/FIT3162-Computer-Science-Project-2
https://github.com/Ashard/FIT3162-Computer-Science-Project-2
3ebd237771afc32215d5b6ff7e39460f38135cf6
bd72b85ca98cd6d605d09a8a0c29e64097c5b425
48905b1acb766778f1e0a5da85f2c04004a6408c
refs/heads/master
2020-04-10T15:48:46.191037
2019-02-07T12:41:55
2019-02-07T12:41:55
161,123,843
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5476190447807312, "alphanum_fraction": 0.5541125535964966, "avg_line_length": 29.766666412353516, "blob_id": "b03b407211eedb809b6fdfef194a783a22bf4a61", "content_id": "08c506fce5851796146615972b8893a07bb954b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 924, "license_type": "no_license", "max_line_length": 93, "num_lines": 30, "path": "/final_code_deliverables/line_to_tuple.py", "repo_name": "Ashard/FIT3162-Computer-Science-Project-2", "src_encoding": "UTF-8", "text": "import re\n# parameter: filename: the name of the file which contains the annotated aspects and sentence\n# return: a list of tuples, where each tuple is (aspects,sentence)\ndef line_to_tuple(filename):\n def extract(data):\n # trimming word after\"#\"\n data2 = []\n for i in data:\n data2.append(i[:i.find(\"[\")])\n return data2\n\n result = []\n\n file = open(filename, \"r\")\n for line in file:\n opinion_sentence = []\n if re.match(\"^[a-zA-Z]+.*\", line):\n line = line.strip(\"\\n\")\n line = line.split(\"##\")\n temp = line[0]\n temp = temp.split(\",\")\n extracted_aspects = temp\n opinion_sentence.append(line[1])\n\n cleaned_aspects = extract(extracted_aspects)\n\n if len(cleaned_aspects) > 0:\n result.append((cleaned_aspects, opinion_sentence))\n file.close()\n return result\n\n" }, { "alpha_fraction": 0.43095850944519043, "alphanum_fraction": 0.45795080065727234, "avg_line_length": 35.313331604003906, "blob_id": "fed423c58fcff54512fb5560fdfdb477b211d1bb", "content_id": "88fbe6ae4575e0126f6486124068cbfc85bf1ec0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5446, "license_type": "no_license", "max_line_length": 111, "num_lines": 150, "path": "/final_code_deliverables/explicit_aspect_extractor.py", "repo_name": "Ashard/FIT3162-Computer-Science-Project-2", "src_encoding": "UTF-8", "text": "import nltk\nfrom nltk import pos_tag, word_tokenize\nfrom nltk.corpus import stopwords\nimport string\nimport re\nfrom string import punctuation\nfrom xml.dom import minidom\nfrom functools import reduce\n\n\ndef pos(sentence):\n \"\"\"\n #Function: To tokenize the sentence as well as pos tag the words\n #Parameters: string in a list\n #Return: array of words with pos tag\n \"\"\"\n new_test = []\n new_test.append(sentence)\n tokenized_sents = [word_tokenize(i) for i in new_test]\n new_sentence = [nltk.pos_tag(sent) for sent in tokenized_sents]\n return new_sentence\n\n\ndef rules(words):\n \"\"\"\n #Function: Rule based approach for explicit aspects\n #Parameters: each sentence in an array\n #Return: for each sentence, aspects in an array\n \"\"\"\n features = []\n duplicates = []\n newlist = []\n\n for i in range(0, len(words)):\n if i == len(words) - 1 and (words[len(words) - 1][1] == \"JJ\" or words[len(words) - 1][1] == \"RB\"\n or words[len(words) - 1][1] == \"RBR\" or words[len(words) - 1][1] == \"RBS\"\n or words[len(words) - 1][1] == \"VBN\" or words[len(words) - 1][1] == \"VBD\"):\n break\n\n # JJ rule\n if (i + 1 < len(words) - 1):\n if ((words[i][1] == \"JJ\") and (words[i + 1][1] == \"NN\" or words[i + 1][1] == \"NNS\") and (\n words[i + 2][1] == \"NN\" or words[i + 2][1] == \"NNS\")):\n features.append([words[i + 1][0], words[i + 2][0]])\n else:\n # if (i+1 < len(words)-1):\n if ((words[i][1] == \"JJ\") and (words[i + 1][1] == \"NN\" or words[i + 1][1] == \"NNS\")):\n features.append([words[i + 1][0]])\n\n # DT rule\n if (i + 1 < len(words) - 1):\n if ((words[i][1] == \"DT\") and (\n words[i + 1][1] == \"NNS\" or words[i + 1][1] == \"NNP\" or words[i + 1][1] == \"NN\") and (\n words[i + 2][1] == \"NNS\" or words[i + 2][1] == \"NNP\" or words[i + 2][1] == \"NN\")):\n features.append([words[i + 1][0], words[i + 2][0]])\n else:\n # if (i+1 < len(words)-1):\n if ((words[i][1] == \"DT\") and (\n words[i + 1][1] == \"NNP\" or words[i + 1][1] == \"NNS\" or words[i + 1][1] == \"NN\")):\n features.append([words[i + 1][0]])\n\n # CC rule\n if (i + 1 < len(words) - 1):\n if ((words[i][1] == \"CC\") and (words[i + 1][1] == \"NNP\") and (words[i + 2][1] == \"NNP\")):\n features.append([words[i + 1][0], words[i + 2][0]])\n else:\n if ((words[i][1] == \"CC\") and (words[i + 1][1] == \"NN\")):\n features.append([words[i + 1][0]])\n\n # a\n if (i + 1 < len(words) - 1):\n if ((words[i][1] == \"PRP$\") and (words[i + 1][1] == \"NN\") and (words[i + 2][1] == \"NN\")):\n features.append([words[i + 1][0], words[i + 2][0]])\n\n # Starting word rule\n if (i == 0):\n if ((words[i][1] == \"NN\" or words[i][1] == \"NNS\")):\n features.append([words[i][0]])\n\n # NN rule\n if (i + 1 < len(words) - 1):\n if ((words[i][1] == \"NN\") and (words[i + 1][1] == \"NN\") and (words[i + 2][1] == \"NN\")):\n features.append([words[i][0], words[i + 1][0], words[i + 2][0]])\n else:\n if ((words[i][1] == \"NN\" or words[i][1] == \"NNP\") and (words[i + 1][1] == \"NN\")):\n duplicates.append([words[i][0], words[i + 1][0]])\n\n # IN rule\n if (i + 1 < len(words) - 1):\n if ((words[i][1] == \"IN\") and (words[i + 1][1] == \"NN\" or words[i + 1][1] == \"NNS\")):\n features.append([words[i + 1][0]])\n\n # RB rule\n if (i + 1 < len(words) - 1):\n if ((words[i][1] == \"RB\" or words[i][1] == \"RBR\" or words[i][1] == \"RBS\")\n and (words[i + 1][1] == \"RB\" or words[i + 1][1] == \"RBR\" or words[i + 1][1] == \"RBS\")\n and (words[i + 2][1] == \"NN\" or words[i + 2][1] == \"NNS\")):\n duplicates.append([words[i + 2][0]])\n\n for i in features:\n for j in duplicates:\n if j == i:\n duplicates.remove(i)\n features.extend(duplicates)\n for each in features:\n if each not in newlist:\n newlist.append(each)\n\n return newlist\n\n\ndef rule_noun_phrases(words):\n \"\"\"\n #Function: To combine matched words as one single aspect\n #Parameters: nested array\n #Return: combines tuples in nested array\n \"\"\"\n noun_phrases = []\n for i in words:\n extracted_features = rules(i)\n\n noun_phrases.append(extracted_features)\n\n nouns_phrases = []\n for word in noun_phrases:\n for i in word:\n if len(i) > 1:\n s = \" \"\n i = s.join(i)\n nouns_phrases.append([i])\n else:\n nouns_phrases.append(i)\n nouns_phrases = [val for sublist in nouns_phrases for val in sublist]\n return nouns_phrases\n\n\ndef remove_unnessecary_elements(aspect):\n for a in range(len(aspect) - 1):\n for b in range(a + 1, len(aspect)):\n if aspect[a] in aspect[b]:\n aspect[a] = \" \"\n elif aspect[b] in aspect[a]:\n aspect[b] = \" \"\n\n array = []\n for a in aspect:\n if a not in \" \":\n array.append(a)\n\n return array" }, { "alpha_fraction": 0.7394495606422424, "alphanum_fraction": 0.763302743434906, "avg_line_length": 59.55555725097656, "blob_id": "4fa46586ce434c9474002e656a821f48f9481407", "content_id": "ac4f595cbf049393376e591a5648df3f30cb04e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 545, "license_type": "no_license", "max_line_length": 208, "num_lines": 9, "path": "/final_code_deliverables/README.txt", "repo_name": "Ashard/FIT3162-Computer-Science-Project-2", "src_encoding": "UTF-8", "text": "1) Open the file named \"main.py\"\n2) In the line of code that shows \"main(\"ENTER FILENAME HERE\")\", enter the filename of the dataset that contains reviews. We have already prepared 5 files with 1000 reviews each. The filenames are as follows:\n\t1) Amazon_Fire_TV.txt\n\t2) Amazon_Kindle.txt\n\t3) All_New_Fire_HD.txt\n\t4) Fire_Tablet\n\t5) Echo_White.txt\n\n3) After entering the filename, simply run the file and wait. After approximately a minute (varies depending on the machine), a screen will pop up on the screen which shows the summarized opinions. " }, { "alpha_fraction": 0.7424242496490479, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 32, "blob_id": "351ba320cba4d9c1cd35bd9cf7074ee5f2bea527", "content_id": "d4a938642aca4a9aadb62b3c5968a5239dd27b06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 66, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/README.md", "repo_name": "Ashard/FIT3162-Computer-Science-Project-2", "src_encoding": "UTF-8", "text": "# FIT3162-Computer-Science-Project-2-\nAspect based opinion mining\n" }, { "alpha_fraction": 0.6537086963653564, "alphanum_fraction": 0.6697944402694702, "avg_line_length": 38.98214340209961, "blob_id": "821f90182459e61049b31d06a39bda8db162933b", "content_id": "22bbe55cc618c302539b3701987eb89b39b647be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2238, "license_type": "no_license", "max_line_length": 164, "num_lines": 56, "path": "/final_code_deliverables/plot_graph.py", "repo_name": "Ashard/FIT3162-Computer-Science-Project-2", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\n\n# parameter: summarized_opinions: a dictionary that contains (aspect: sentiment score) pair\n# return: the top ten most reviewed aspects along with the number of positive and negative reviews. eg: [(aspect, num of positive reviews, num of negative reviews)]\ndef top_ten_most_reviewed_aspects(summarized_opinion):\n # a list that contains the aspect and the total number of positive and negative reviews. eg: [(aspect, 2000)]\n aspects_with_total_num_reviews = []\n\n for item in summarized_opinion:\n polarity_list = summarized_opinion[item]\n if (polarity_list[0] + polarity_list[1]) > 30:\n aspects_with_total_num_reviews.append(\n (polarity_list[0] + polarity_list[1], item, polarity_list[0], polarity_list[1]))\n\n aspects_with_total_num_reviews.sort()\n aspects_with_total_num_reviews = aspects_with_total_num_reviews[::-1]\n\n top_ten = []\n\n i = 0\n while i < 10 and i < len(aspects_with_total_num_reviews):\n top_ten.append((aspects_with_total_num_reviews[i][1], aspects_with_total_num_reviews[i][2],\n aspects_with_total_num_reviews[i][3]))\n i += 1\n\n return top_ten\n\n# parameter: summarized_opinions: a dictionary that contains (aspect: sentiment score) pair\n# return: plots a graph which shows number of positive and negative reviews for each of the aspects.\ndef plot_graph(summarized_opinion, label):\n negative_data = []\n positive_data = []\n aspect_list = []\n\n top_ten_aspects = top_ten_most_reviewed_aspects(summarized_opinion)\n\n for item in top_ten_aspects:\n aspect_list.append(item[0])\n positive_data.append(item[1])\n negative_data.append(-(item[2]))\n\n x = range(len(aspect_list))\n\n plt.rcParams.update({'font.size': 25})\n fig = plt.figure(figsize=(12, 12))\n ax = plt.subplot(111)\n\n plt.xticks(rotation='vertical')\n plt.xlabel(\"Aspects\")\n plt.ylabel(\"Number of reviews\")\n ax.bar(x, negative_data, color='r', tick_label=aspect_list, align='center', width=0.3, label=\"negative\")\n ax.bar(x, positive_data, color='b', tick_label=aspect_list, align='center', width=0.3, label=\"positive\")\n plt.legend()\n plt.title(label=label)\n\n plt.show()" }, { "alpha_fraction": 0.6787974834442139, "alphanum_fraction": 0.6835442781448364, "avg_line_length": 34.16666793823242, "blob_id": "2886bf5d37cddd1c79a203485943dda53a919aa2", "content_id": "b54dcee118451cf0faebe769c036979de355994d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 632, "license_type": "no_license", "max_line_length": 122, "num_lines": 18, "path": "/final_code_deliverables/senticnet_dictionary.py", "repo_name": "Ashard/FIT3162-Computer-Science-Project-2", "src_encoding": "UTF-8", "text": "# senticnet dictionary\n# parameter: filename: name of the file which contains the opinion word-sentiment pairs of the senticNet 5 knowledge base.\n# return: returns a dictionary where a word-sentiment corresponds to \"key-value\" pair.\ndef senticnet_dict(filename):\n f = open(filename, \"r\")\n\n senticNet = {} # dictionary of opinion words as key-value pair, eg: (happy, positive)\n\n f.readline() # ignore the first line as this contains the heading\n\n for line in f:\n line = line.strip(\"\\t\")\n line = line.split()\n senticNet[line[0]] = line[1]\n\n f.close() # close sentiment file\n\n return senticNet" }, { "alpha_fraction": 0.7575757503509521, "alphanum_fraction": 0.7979797720909119, "avg_line_length": 31.66666603088379, "blob_id": "30490c739d4affb404798441994e563e747e6e95", "content_id": "f1da0f0900f0b261c5198e70e43e269234fc109f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 99, "license_type": "no_license", "max_line_length": 52, "num_lines": 3, "path": "/final_code_deliverables/stanfordCoreNlp.py", "repo_name": "Ashard/FIT3162-Computer-Science-Project-2", "src_encoding": "UTF-8", "text": "from stanfordcorenlp import StanfordCoreNLP\n\nnlp = StanfordCoreNLP('http://localhost', port=9000)\n\n" }, { "alpha_fraction": 0.5537189841270447, "alphanum_fraction": 0.5640496015548706, "avg_line_length": 22.61788558959961, "blob_id": "0a6a911a72d3c4a3f89af718b3acdadc88df4b32", "content_id": "a1dc36e4aa55967378beea25c238102aa956cd9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2904, "license_type": "no_license", "max_line_length": 60, "num_lines": 123, "path": "/final_code_deliverables/implicit_aspect_dictionary.py", "repo_name": "Ashard/FIT3162-Computer-Science-Project-2", "src_encoding": "UTF-8", "text": "import re\nfrom collections import defaultdict\nimport numpy as np\nfrom nltk.corpus import wordnet\nfrom senticnet.senticnet import SenticNet\n\n\ndef read(filename):\n # extract lines based starting letter is an alphabet\n file = open(filename, \"rt\")\n data = []\n for line in file:\n if re.match(\"^[a-zA-Z]+.*\", line):\n line = line.strip(\"\\n\")\n line = line.split(\"##\")\n temp = line[0]\n temp = temp.split(\",\")\n # aspects.append(temp)\n data.append(line[0])\n return data\n\n\ndef clean(data):\n data2 = []\n for i in data:\n temp = str(i)\n temp2 = temp.replace(\"[\", \",\")\n temp3 = temp2.replace(\"]\", \"\")\n data2.append(temp3)\n return data2\n\n\ndef clean2(data):\n # reshaping words in each index\n arr = np.array(data)\n arr = np.hstack(arr)\n arr = arr.reshape(len(arr) // 2, 2)\n final_list = arr.tolist()\n return final_list\n\n\ndef dictionary(data):\n # taking only unique values\n data6 = set(tuple(x) for x in data)\n data7 = [list(x) for x in data6]\n data8 = [(t[1], t[0]) for t in data7]\n\n d = defaultdict(list)\n for k, v in data8:\n d[v].append(k)\n\n temp = (dict(d))\n implicit_dict = {}\n for aspect in temp:\n implicit_dict[aspect] = temp[aspect]\n return (implicit_dict)\n\n\ndef syn_ant(lst):\n # obtaining synonym and antonym words\n synonyms = []\n antonyms = []\n\n for syn in wordnet.synsets(lst):\n for l in syn.lemmas():\n synonyms.append(l.name())\n if l.antonyms():\n antonyms.append(l.antonyms()[0].name())\n antonyms.extend(synonyms)\n return list(set(antonyms))\n\n\ndef more_words(arr):\n # for each aspect it will be appended to a category\n ans = []\n for item in arr:\n synonyms = syn_ant(item[0])\n for i in range(len(synonyms)):\n ans.append([synonyms[i], item[1]])\n return ans\n\n\ndef Senticnet(data):\n # obtaining semantic of each word\n sn = SenticNet()\n se = []\n error = []\n for item in data:\n try:\n semantics = sn.semantics(item[0])\n for i in range(len(semantics)):\n se.append([semantics[i], item[1]])\n except KeyError as e:\n error = e\n se.extend(data)\n return se\n\n\ndef Output(data):\n data = clean(data)\n data = [s.split(\",\") for s in data]\n data = [[s.strip(' ') for s in words] for words in data]\n for i in data:\n if len(i) == 1:\n data.remove(i)\n\n data = [num for elem in data for num in elem]\n data = [data[i:i + 2] for i in range(0, len(data), 2)]\n\n new_data = more_words(data)\n data.extend(new_data)\n data = Senticnet(data)\n data = clean2(data)\n data = dictionary(data)\n return data\n\n\ndef implicit_aspect_dict(filename):\n data = read(filename)\n # print(data)\n data = Output(data)\n\n return data" }, { "alpha_fraction": 0.7296587824821472, "alphanum_fraction": 0.7296587824821472, "avg_line_length": 24.399999618530273, "blob_id": "0a4b70f593e5b6e7b26b417169ca61ea2e5b9343", "content_id": "3cf318627952548e0bf00b31a27d5f1ae3d315dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 51, "num_lines": 15, "path": "/final_code_deliverables/main.py", "repo_name": "Ashard/FIT3162-Computer-Science-Project-2", "src_encoding": "UTF-8", "text": "from aspect_extractor import aspect_extractor\nfrom plot_graph import plot_graph\n\ndef main(filename):\n summarized_opinion = aspect_extractor(filename)\n\n #get the label to label the chart\n label = filename\n label = label.replace(\".txt\", \"\")\n\n #plot the summarized opinion\n plot_graph(summarized_opinion, label)\n\n#enter the filename here\nmain(\"ENTER FILENAME HERE\")\n" }, { "alpha_fraction": 0.5265676379203796, "alphanum_fraction": 0.5348992943763733, "avg_line_length": 44.69877624511719, "blob_id": "8f44291fbec2bd9a7f2ae84c1187607ee8b9e55e", "content_id": "1df422d4a64a7fd97e5e7300d43e428a99112097", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29886, "license_type": "no_license", "max_line_length": 137, "num_lines": 654, "path": "/final_code_deliverables/aspect_extractor.py", "repo_name": "Ashard/FIT3162-Computer-Science-Project-2", "src_encoding": "UTF-8", "text": "import json\nimport string\nfrom stanfordCoreNlp import nlp\nfrom implicit_aspect_dictionary import implicit_aspect_dict\nfrom senticnet_dictionary import senticnet_dict\nfrom nltk.corpus import stopwords\nfrom explicit_aspect_extractor import rule_noun_phrases\nfrom explicit_aspect_extractor import remove_unnessecary_elements\nfrom explicit_aspect_extractor import pos\n\n# parameter: filename: the filename of the file which contains the annotated aspects and sentences\n# return: the summarized opinions for each extracted aspect as a dictionary.\ndef aspect_extractor(filename):\n # parameter: token_index: the index of the token that was extracted\n # function: extracts the additional aspect (t) as per the rule\n def additional_rule_1(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['dep'] == 'conj' and dep_obj['governor'] == token_index:\n t_index = dep_obj['dependent']\n t_token_obj = sent_obj['tokens'][t_index - 1]\n\n if t_token_obj['word'].lower() not in stops and aspects.get(t_token_obj['lemma'].lower()) is None:\n opinion_words = opinion_word_extractor(sent_obj, t_token_obj['index'])\n agg_sentiment_score = sentiment_score(opinion_words)\n\n aspects[t_token_obj['lemma'].lower()] = agg_sentiment_score\n\n additional_rule_2(t_token_obj['index'])\n\n # parameter: token_index: the index of the token that was extracted\n # function: extracts the additional aspect (t) as per the rule\n def additional_rule_2(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['dep'] == 'compound' and dep_obj['governor'] == token_index:\n t_token = sent_obj['tokens'][token_index - 1]\n h_index = dep_obj['dependent']\n h_token = sent_obj['tokens'][h_index - 1]\n\n if pos_noun(h_token) is True:\n\n if t_token['lemma'].lower() in aspects:\n aspects.pop(t_token['lemma'].lower())\n\n if h_token['word'].lower() not in stops and t_token['word'].lower() not in stops and aspects.get(\n h_token['lemma'].lower() + \"_\" + t_token['lemma'].lower()) is None:\n opinion_words = opinion_word_extractor(sent_obj, h_token['index'])\n agg_sentiment_score = sentiment_score(opinion_words)\n\n opinion_words = opinion_word_extractor(sent_obj, t_token['index'])\n agg_sentiment_score += sentiment_score(opinion_words)\n\n aspects[h_token['lemma'].lower() + \"_\" + t_token['lemma'].lower()] = agg_sentiment_score\n\n # parameter: dep_obj: check if the dependencies in dep_list are connected to dep_obj\n # return: True if dep_obj has one of the dependencies, False otherwise.\n def opinion_word_dependency(dep_obj):\n dep_list = ['amod', 'nmod', 'advmod', 'nsubj','conj']\n for dep in dep_list:\n if dep_obj['dep'] == dep:\n return True\n return False\n\n # parameter: token_index: the index of the token that was extracted\n # return: the opinion word set for the extracted aspect\n def opinion_word_extractor(sent_obj, token_index):\n opinion_words = set()\n\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['governor'] == token_index and opinion_word_dependency(dep_obj) is True:\n opinion_word_index = dep_obj['dependent']\n opinion_word_token = sent_obj['tokens'][opinion_word_index - 1]\n\n if pos_adj(opinion_word_token) is True or pos_noun(opinion_word_token) is True or pos_verb(\n opinion_word_token) is True:\n opinion_words.add(opinion_word_token['word'].lower())\n\n if dep_obj['dependent'] == token_index and opinion_word_dependency(dep_obj) is True:\n opinion_word_index = dep_obj['governor']\n opinion_word_token = sent_obj['tokens'][opinion_word_index - 1]\n if pos_adj(opinion_word_token) is True or pos_noun(opinion_word_token) is True or pos_verb(\n opinion_word_token) is True:\n opinion_words.add(opinion_word_token['word'].lower())\n\n return opinion_words\n\n # parameter: opinion_words: a set containing the opinion words\n # return: the aggregate sentiment score of all the words in the set\n def sentiment_score(opinion_words):\n if len(opinion_words) == 0:\n return 0\n\n score = 0\n for word in opinion_words:\n if senticNet.get(word) is not None:\n\n if senticNet.get(word) == 'positive':\n score += 1\n else:\n score -= 1\n return score\n\n # parameter: aspect_term: the implicit aspect term which needs to be categorized\n # return: the implicit aspect category, False if no category exists\n def implicit_aspect_categorizer(aspect_term):\n if implicit_lexicon.get(aspect_term) is not None:\n return implicit_lexicon.get(aspect_term)\n return False\n\n # parameter: aspect_term: the aspect which needs to extracted\n def add_to_aspects(token_index):\n if isinstance(token_index, tuple):\n token_obj = sent_obj['tokens']\n token_1_index = token_index[0]\n token_2_index = token_index[1]\n\n word_1 = token_obj[token_1_index - 1]\n word_2 = token_obj[token_2_index - 1]\n\n if word_1['word'].lower() not in stops and word_2['word'].lower() not in stops and aspects.get(\n word_1['lemma'].lower() + \"_\" + word_2['lemma'].lower()) is None:\n opinion_words = opinion_word_extractor(sent_obj, token_1_index)\n agg_sentiment_score = sentiment_score(opinion_words)\n\n opinion_words = opinion_word_extractor(sent_obj, token_2_index)\n agg_sentiment_score += sentiment_score(opinion_words)\n\n aspects[word_1['lemma'].lower() + \"_\" + word_2['lemma'].lower()] = agg_sentiment_score\n\n # extract the additional aspects as per rule 1\n additional_rule_1(token_1_index)\n additional_rule_1(token_2_index)\n\n # extract the additional aspects as per rule 2\n additional_rule_2(token_1_index)\n additional_rule_2(token_2_index)\n\n else:\n token_obj = sent_obj['tokens']\n\n for obj in token_obj:\n if obj['index'] == token_index:\n if obj['word'].lower() not in stops and aspects.get(obj['lemma'].lower()) is None:\n opinion_words = opinion_word_extractor(sent_obj, obj['index'])\n agg_sentiment_score = sentiment_score(opinion_words)\n\n aspects[obj['lemma'].lower()] = agg_sentiment_score\n\n additional_rule_1(token_index)\n additional_rule_2(token_index)\n\n # parameter: aspect_dict: the dictionary which contains the (aspect:sentiment_score) pair\n # return: no return value, simply updates global variable summarized_sentiment_score\n def sentiment_summarizer(aspect_dict):\n for item in aspect_dict:\n aspect_category = implicit_aspect_categorizer(item)\n\n # if the aspect does not have a category\n if aspect_category is False:\n # if the aspect does not exist in the dictionary\n if summarized_sentiment_score.get(item) is None:\n # if the aspect has a positive score\n if aspect_dict[item] > 0:\n summarized_sentiment_score[item] = [1, 0, 0] # [positive, negative, neutral]\n\n # if the aspect has a negative score\n elif aspect_dict[item] < 0:\n summarized_sentiment_score[item] = [0, 1, 0] # [positive, negative, neutral]\n\n # if the aspect has a neutral score\n else:\n summarized_sentiment_score[item] = [0, 0, 1] # [positive, negative, neutral]\n # if the aspect already exists\n else:\n\n # if the aspect has a positive score\n if aspect_dict[item] > 0:\n # update the positive score\n summarized_sentiment_score[item][0] += 1\n\n # if the aspect has a negative score\n elif aspect_dict[item] < 0:\n # update the positive score\n summarized_sentiment_score[item][1] += 1\n\n # if the aspect has a neutral score\n else:\n summarized_sentiment_score[item][2] += 1\n\n\n # if the aspect has a category\n else:\n # if the aspect category does not exist\n if summarized_sentiment_score.get(aspect_category[0]) is None:\n # if the score is positive for the aspect category\n if aspect_dict[item] > 0:\n summarized_sentiment_score[aspect_category[0]] = [1, 0, 0] # [positive, negative, neutral]\n\n # if the score is negative\n elif aspect_dict[item] < 0:\n summarized_sentiment_score[aspect_category[0]] = [0, 1, 0] # [positive, negative, neutral]\n\n # the score is neutral\n else:\n summarized_sentiment_score[aspect_category[0]] = [0, 0, 1]\n\n # if the aspect category does exist\n else:\n # if the score is positive for the aspect category\n if aspect_dict[item] > 0:\n summarized_sentiment_score[aspect_category[0]][0] += 1 # [positive, negative, neutral]\n\n # if the score is negative\n elif aspect_dict[item] < 0:\n summarized_sentiment_score[aspect_category[0]][1] += 1 # [positive, negative, neutral]\n\n # the score is neutral\n else:\n summarized_sentiment_score[aspect_category[0]][2] += 1\n\n # check if the token is a Noun\n # parameter: token_obj: the token which needs to be checked\n # return: True if token is a Noun, False otherwise.\n def pos_noun(token_obj):\n if (token_obj['pos'] == 'NN' or token_obj['pos'] == 'NNS' or token_obj['pos'] == 'NNP' or token_obj[\n 'pos'] == 'NNPS'):\n return True\n return False\n\n # check if the token is a Verb\n # parameter: token_obj: the token which needs to be checked\n # return: True if token is a Verb, False otherwise.\n def pos_verb(token_obj):\n if (token_obj['pos'] == 'VB' or token_obj['pos'] == 'VBD' or token_obj['pos'] == 'VBG' or token_obj[\n 'pos'] == 'VBN' or token_obj['pos'] == 'VBP' or token_obj['pos'] == 'VBZ'):\n return True\n return False\n\n # check if the token is a Adjective\n # parameter: token_obj: the token which needs to be checked\n # return: True if token is a Adjective, False otherwise.\n def pos_adj(token_obj):\n if (token_obj['pos'] == 'JJ' or token_obj['pos'] == 'JJR' or token_obj['pos'] == 'JJS'):\n return True\n return False\n\n # check if the token is a Adverb\n\n # parameter: token_obj: the token which needs to be checked\n # return: True if token is a Adverb, False otherwise.\n def pos_adverb(token_obj):\n if (token_obj['pos'] == 'RB' or token_obj['pos'] == 'RBR' or token_obj['pos'] == 'RBS' or token_obj[\n 'pos'] == 'WRB'):\n return True\n return False\n\n # parameter: token_index: the index of the token (t) in the sentence\n\n # return: True if rule number 1 conditions are satisfied, False otherwise\n def subject_noun_rule_1(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if (dep_obj['dep'] == 'amod' or dep_obj['dep'] == 'advmod') and dep_obj['governor'] == token_index:\n if (senticNet.get(dep_obj['dependentGloss'])) is not None:\n return True\n return False\n\n # parameter: sent_obj: the current sentence object (dictionary) being analyzed\n # return: True if the sentence does contain auxiliary verb, False otherwise\n def has_aux(sent_obj):\n for token_obj in sent_obj['tokens']:\n if token_obj['pos'] == 'MD': # if the sentence contains auxiliary verb\n return True\n return False\n\n # parameter: token_index: the index of the token (t) in the sentence\n # return: True if the conditions for rule 2a are satisfied, False otherwise.\n def subject_noun_rule_2a(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if (dep_obj['dep'] == 'amod' or dep_obj['dep'] == 'advmod' or dep_obj['dep'] == 'advcl') and dep_obj[\n 'governor'] == token_index:\n return True\n return False\n\n # parameter: token_index: the index of the token (t) in the sentence\n # return: token index returned if the conditions for rule 2b are satisfied, False otherwise.\n def subject_noun_rule_2b(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['dep'] == 'dobj' and dep_obj['governor'] == token_index:\n n_index = dep_obj['dependent']\n token_obj = sent_obj['tokens'][n_index - 1]\n\n if (pos_noun(token_obj) is True) and senticNet.get(token_obj['word'].lower()) is None:\n return (token_obj['index'])\n return False\n\n # parameter: token_index: the index of the token (t) in the sentence\n # return: token index returned if the conditions for rule 2c are satisfied, False otherwise.\n def subject_noun_rule_2c(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['dep'] == 'dobj' and dep_obj['governor'] == token_index:\n n_index = dep_obj['dependent']\n token_obj = sent_obj['tokens'][n_index - 1]\n\n n_exists = False\n if (pos_noun(token_obj) is True) and senticNet.get(token_obj['word'].lower()) is not None:\n n_exists = True\n n = token_obj['index']\n\n # find n1\n n1 = subject_noun_rule_2c_helper(n_index)\n\n if (n_exists is True) and (n1 is not False):\n return n, n1\n\n if (n_exists is False) and (n1 is not False):\n return n1\n\n if (n_exists is True) and (n1 is False):\n return n\n\n return False\n\n # parameter: token_index: the index of the token (n) in the sentence\n # return: token index returned if the conditions for rule 2c are satisfied, False otherwise.\n def subject_noun_rule_2c_helper(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['governor'] == token_index:\n n1_index = dep_obj['dependent']\n token_obj = sent_obj['tokens'][n1_index - 1]\n\n if pos_noun(token_obj) is True:\n return token_obj['index']\n return False\n\n # parameter: token_index: the index of the token (t) in the sentence\n # return: token index returned if the conditions for rule 2d are satisfied, False otherwise.\n def subject_noun_rule_2d(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['dep'] == 'xcomp' and dep_obj['governor'] == token_index:\n t_token = sent_obj['tokens'][token_index - 1]\n\n t1_index = dep_obj['dependent']\n\n t1_token = sent_obj['tokens'][t1_index - 1]\n\n t_t1 = False\n if senticNet.get(t_token['word'].lower() + \"_\" + t1_token['word'].lower()) is not None:\n t_t1 = True\n\n t2 = subject_noun_rule_2d_helper(t1_index)\n\n if (t2 is not False) and t_t1 is True:\n return t_token['index'], t1_token['index'], t2\n\n if (t2 is not False) and t_t1 is False:\n return t2\n\n if (t2 is False) and t_t1 is True:\n return t_token['index'], t1_token['index']\n return False\n\n # parameter: token_index: the index of the token (t1) in the sentence\n # return: token index returned if the conditions for rule 2d are satisfied, False otherwise.\n def subject_noun_rule_2d_helper(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['governor'] == token_index:\n t2_index = dep_obj['dependent']\n token_obj = sent_obj['tokens'][t2_index - 1]\n\n if pos_noun(token_obj) is True:\n return token_obj['index']\n return False\n\n # parameter: token_index: the index of the token (t) in the sentence\n # return: token index of the token to be extracted returned if the conditions for rule 3 are satisfied, False otherwise.\n def subject_noun_rule_3(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if (dep_obj['dep'] == 'cop' or dep_obj['dep'] == 'xcomp') and dep_obj['governor'] == token_index:\n copular_verb_index = dep_obj['dependent']\n copular_token = sent_obj['tokens'][copular_verb_index - 1]\n t_token = sent_obj['tokens'][token_index - 1]\n\n if implicit_lexicon.get(t_token['word'].lower()) is not None:\n return t_token['index']\n return False\n\n # parameter: token_index: the index of the token (t) in the sentence\n # return: True if the token (t) is in a copular relation, False otherwise\n def subject_noun_rule_4(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if (dep_obj['dep'] == 'cop' or dep_obj['dep'] == 'xcomp') and dep_obj['governor'] == token_index:\n return True\n return False\n\n # parameter: token_index: the index of the token (t) in the sentence\n # return: the indexes of the tokens that need to be extracted as per the rules, False if none exist\n def subject_noun_rule_5(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if (dep_obj['dep'] == 'cop' or dep_obj['dep'] == 'xcomp') and dep_obj['governor'] == token_index:\n t_token_obj = sent_obj['tokens'][token_index - 1]\n t1 = subject_noun_rule_5_helper(token_index)\n\n if t1 is not False:\n if implicit_lexicon.get(t_token_obj['word'].lower()) is not None:\n return t_token_obj['index'], t1\n else:\n return t1\n else:\n if implicit_lexicon.get(t_token_obj['word'].lower()) is not None:\n return t_token_obj['index']\n return False\n\n # parameter: token_index: the index of the token (copular_verb) in the sentence\n # return: token index if the conditions are met, False otherwise\n def subject_noun_rule_5_helper(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['governor'] == token_index:\n t1_index = dep_obj['dependent']\n t1_token_obj = sent_obj['tokens'][t1_index - 1]\n if pos_verb(t1_token_obj) is True and implicit_lexicon.get(t1_token_obj['word'].lower()) is not None:\n return t1_token_obj['index']\n return False\n\n # parameter: sent_obj: the sentence object currently being passed through(the sentence without subject noun)\n # return: token index of the word to be extracted as per rule, False otherwise\n def non_subject_noun_rule_1(sent_obj):\n for token_obj in sent_obj['tokens']:\n if (pos_adverb(token_obj) is True) or (pos_adj(token_obj) is True) or pos_verb(token_obj) is True:\n token_index = token_obj['index']\n t = non_subject_noun_rule_1_helper(token_index)\n if t is not False:\n if implicit_lexicon.get(token_obj['word']) is not None:\n return token_obj['index']\n return False\n\n # parameter: token_index: the index of the current token in the sentence\n # return: True if the conditions for (t) are met, False otherwise\n def non_subject_noun_rule_1_helper(token_index):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['dep'] == 'mark' or dep_obj['dep'] == 'xcomp':\n return True\n return False\n\n # parameter: sent_obj: the sentence object currently being parsed through\n # return: token indexes as per the rule, False if none exist\n def non_subject_noun_rule_2(sent_obj):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['dep'] == 'nmod' or dep_obj['dep'] == 'case' or dep_obj['dep'] == 'mark':\n h_index = dep_obj['governor']\n t_index = dep_obj['dependent']\n\n h_token_obj = sent_obj['tokens'][h_index - 1]\n t_token_obj = sent_obj['tokens'][t_index - 1]\n\n return h_token_obj['index'], t_token_obj['index']\n return False\n\n # parameter: sent_obj: the sentence object currently being parsed through\n # return: token indexes as per the rule, False if none exist\n def non_subject_noun_rule_3(sent_obj):\n for dep_obj in sent_obj['basicDependencies']:\n if dep_obj['dep'] == 'dobj':\n t_index = dep_obj['dependent']\n t_token_obj = sent_obj['tokens'][t_index - 1]\n return t_token_obj['index']\n return False\n\n # parameter: word: the input word which needs to be lemmatized\n # return: lemmatized word\n def lemmatize_word(word):\n word = word.replace('\"', '')\n word = word.replace(\"'\", \"\")\n\n word = nlp.annotate(word, properties={'annotators': 'lemma', 'outputFormat': 'json'})\n\n # convert the json string to a dictionary\n if type(word) is str or type(word) is unicode:\n word = json.loads(word, strict=False)\n\n # lemmatize the cleaned aspect\n word = word['sentences'][0]\n word_token = word['tokens']\n\n # if there are more than one word in the aspect\n word = ''\n if len(word_token) > 0:\n for token_obj in word_token:\n word += token_obj['lemma'] + \" \"\n\n word = word.lower()\n word = word.strip()\n word = word.replace(\" \", \"_\")\n word = word.replace(\"-\", \"_\")\n\n return word\n\n # parameter: word: the word in the sentence of which the index is needed.\n # return: index of the word in the sentence, and sentence object if it exists, False otherwise.\n def find_index(word):\n for sent_obj in sentences:\n tokens_obj = sent_obj['tokens']\n for token in tokens_obj:\n if token['word'] == word:\n return sent_obj, token['index']\n return False\n\n summarized_sentiment_score = {}\n\n # senticnet dictionary\n senticNet = senticnet_dict(\"senticnet5.txt\")\n\n # implicit aspect lexicon\n implicit_lexicon = implicit_aspect_dict(\"implicit_aspects_dict.txt\")\n\n # stopword set\n stops = set(stopwords.words('english'))\n\n #open the file that contains the review text\n #aspect_sent_tuple = line_to_tuple(filename)\n file_obj = open(filename, 'r')\n\n # extract the aspects for each opinion\n for line in file_obj:\n\n # the dictionary which keeps track of all the aspects that have been extracted from the sentence\n aspects = {}\n\n output = nlp.annotate(line,\n properties={'annotators': 'tokenize,ssplit,pos,depparse,lemma', 'outputFormat': 'json'})\n\n # convert the json string to a dictionary\n if type(output) is str or type(output) is unicode:\n output = json.loads(output, strict=False)\n\n sentences = output['sentences']\n\n # explicit aspect extraction\n words = pos(line)\n rule_phrases = rule_noun_phrases(words)\n rule_phrases = [word for word in rule_phrases if\n word not in stopwords.words('english') and word not in string.punctuation]\n rule_phrases = remove_unnessecary_elements(rule_phrases)\n\n # finding the corresponding opinion words\n for aspect_term in rule_phrases:\n aspect_term_index = find_index(aspect_term)\n\n if aspect_term_index is not False:\n opinion_words = opinion_word_extractor(aspect_term_index[0], aspect_term_index[1])\n agg_sentiment_score = sentiment_score(opinion_words)\n\n else:\n agg_sentiment_score = 0\n\n aspect_term = lemmatize_word(aspect_term)\n aspect_term = aspect_term.strip()\n aspect_term = aspect_term.lower()\n\n # aspect_term = aspect_term.replace(\" \", \"_\")\n aspects[aspect_term] = agg_sentiment_score\n\n # implicit aspect extraction\n for sent_obj in sentences:\n has_subject_noun = False # flag to check if the sentence contains subject noun or not\n for dep_obj in sent_obj['basicDependencies']:\n\n # trigger for subject noun rule\n if dep_obj[\n 'dep'] == 'nsubj': # or dep_obj['dep'] == 'csubj' or dep_obj['dep'] == 'csubjpass' or dep_obj['dep'] == 'nsubjpass':\n\n has_subject_noun = True\n\n h = dep_obj['dependent'] # active token\n t = dep_obj['governor']\n\n rule_1 = subject_noun_rule_1(t)\n if rule_1 is not False:\n add_to_aspects(t)\n\n # #rule number 2\n # if has_aux(sent_obj) is False: # if the sentence does not contain auxiliary verb\n # rule 2a\n # rule_2a = subject_noun_rule_2a(t)\n # if rule_2a is not False:\n # add_to_aspects(t)\n # add_to_aspects(h)\n\n # rule 2b\n # rule_2b = subject_noun_rule_2b(t)\n # if rule_2b is not False:\n # add_to_aspects(rule_2b)\n\n # rule_2c = subject_noun_rule_2c(t)\n # if rule_2c is not False:\n # if isinstance(rule_2c, tuple):\n # add_to_aspects(rule_2c[0])\n # add_to_aspects(rule_2c[1])\n # else:\n # add_to_aspects(rule_2c)\n\n # rule_2d = subject_noun_rule_2d(t)\n # if rule_2d is not False:\n # if isinstance(rule_2d, tuple):\n # #t,t2, and t2\n # if len(rule_2d) == 3:\n # add_to_aspects((rule_2d[0],rule_2d[1]))\n # add_to_aspects(rule_2d[2])\n\n # if len(rule_2d) == 2:\n # add_to_aspects((rule_2d[0], rule_2d[1]))\n\n # else:\n # add_to_aspects(rule_2d)\n\n rule_3 = subject_noun_rule_3(t)\n if rule_3 is not False:\n add_to_aspects(rule_3)\n\n # rule_4 = subject_noun_rule_4(t)\n # if rule_4 is not False:\n # h_token = sent_obj['tokens'][h - 1]\n # if pos_noun(h_token) is True:\n # add_to_aspects(h)\n\n rule_5 = subject_noun_rule_5(t)\n if rule_5 is not False:\n if isinstance(rule_5, tuple):\n add_to_aspects(rule_5[0])\n add_to_aspects(rule_5[1])\n else:\n add_to_aspects(rule_5)\n\n # # noun subject noun rule trigger\n if has_subject_noun is False:\n\n rule_6 = non_subject_noun_rule_1(sent_obj)\n if rule_6 is not False:\n add_to_aspects(rule_6)\n\n # rule_7 = non_subject_noun_rule_2(sent_obj)\n # if rule_7 is not False:\n # add_to_aspects(rule_7[0])\n # add_to_aspects(rule_7[1])\n\n # rule_8 = non_subject_noun_rule_3(sent_obj)\n # if rule_8 is not False:\n # add_to_aspects(rule_8)\n\n # summarize the sentiment score\n sentiment_summarizer(aspects)\n\n nlp.close()\n file_obj.close()\n\n return summarized_sentiment_score" } ]
10
jixin456/mini-NoteDemo
https://github.com/jixin456/mini-NoteDemo
06c29444d2748afbbb659b0960382788da924ec4
cbad88609bbda8d3929684bef06d5b43421618b0
955e43024e88dfbd9db0fa43d4badbb0f72c5bad
refs/heads/master
2020-04-26T03:12:16.224263
2019-03-01T08:56:19
2019-03-01T08:56:19
173,259,492
6
1
null
2019-03-01T07:55:57
2019-03-12T09:45:14
2019-03-05T12:40:21
Python
[ { "alpha_fraction": 0.7916666865348816, "alphanum_fraction": 0.7916666865348816, "avg_line_length": 48, "blob_id": "cbac537ded4db808b3275f064f308020a2181835", "content_id": "9de28a6458d7d0d5ebe1cb4e3f1fa71b98f6d1bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 48, "num_lines": 1, "path": "/mini-NoteDemo/djagno后台/mynote/apps/note/__init__.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "default_app_config = \"apps.note.apps.NoteConfig\"" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.6510416865348816, "avg_line_length": 18.200000762939453, "blob_id": "af9b2e4d3e0730235db49ab1d7fa1522acf195ce", "content_id": "624be99fe34fe812d696acd794f4f2ad53e0cd26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 37, "num_lines": 10, "path": "/mini-NoteDemo/djagno后台/mynote/apps/users/tests.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "from django.test import TestCase\n\n# Create your tests here.\n\n# import requests\n#\n# url = 'http://127.0.0.1:8000/food/'\n# data = {'data': 2}\n# h = requests.post(url, json=data)\n# print(h.text)\n" }, { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.761904776096344, "avg_line_length": 35.875, "blob_id": "7c02ebf48b9e87e9146d3bb67e20be2476799a0f", "content_id": "6b172bf363216555c8e3d1135af5490f8ac7929b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 342, "license_type": "no_license", "max_line_length": 61, "num_lines": 8, "path": "/mini-NoteDemo/djagno后台/mynote/apps/users/Serializers.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom apps.users.models import UserProFile\n#Serializer 的主要工作是将 Python 数据结构序列化为其它格式(XML/JSON 等等)\nclass UserRegSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserProFile\n fields = ('nickName','avatarUrl','gender','password')" }, { "alpha_fraction": 0.6098953485488892, "alphanum_fraction": 0.6251189112663269, "avg_line_length": 24.634145736694336, "blob_id": "2b2186a0f73d5dfa2ed072e209d7376d363f103d", "content_id": "e4569545d587d0c3a131eedce2748962266de312", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1167, "license_type": "permissive", "max_line_length": 75, "num_lines": 41, "path": "/mini-NoteDemo/djagno后台/mynote/utils/weixin_util/weixin/lib/wxcrypt.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\n\n\"\"\"\n对小程序获取的用户信息解密代码.\n\"\"\"\nimport base64\nfrom Crypto.Cipher import AES\nimport json\n\n# from ..json_import import simplejson as json_import\n# from utils.weixin_util.weixin.json_import import simplejson as json\n\nclass WXBizDataCrypt:\n\n def __init__(self, appid, session_key):\n self.appid = appid\n self.session_key = session_key\n\n def decrypt(self, encrypted_data, iv):\n '''\n aes decode\n 将加密后的信息解密\n @param encrypted_data: 包括敏感数据在内的完整用户信息的加密数据\n @param iv: 加密算法的初始向量\n @return: 解密后数据\n '''\n session_key = base64.b64decode(self.session_key)\n encrypted_data = base64.b64decode(encrypted_data)\n iv = base64.b64decode(iv)\n\n cipher = AES.new(session_key, AES.MODE_CBC, iv)\n\n decrypted = json.loads(self._unpad(cipher.decrypt(encrypted_data)))\n\n if decrypted['watermark']['appid'] != self.appid:\n raise Exception('Invalid Buffer')\n\n return decrypted\n\n def _unpad(self, s):\n return s[:-ord(s[len(s)-1:])]\n" }, { "alpha_fraction": 0.7107843160629272, "alphanum_fraction": 0.7107843160629272, "avg_line_length": 17.545454025268555, "blob_id": "be9f73ad53558635377bc03c7e7335dba785afba", "content_id": "0adced6ea7f21c065f5f55b6791e92d3d23082b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 204, "license_type": "no_license", "max_line_length": 51, "num_lines": 11, "path": "/mini-NoteDemo/djagno后台/mynote/apps/note/serializers.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom apps.note.models import NoteModel\n\n\n\nclass NoteSerializers(serializers.ModelSerializer):\n\n class Meta:\n model = NoteModel\n fields = '__all__'\n" }, { "alpha_fraction": 0.6795454621315002, "alphanum_fraction": 0.6863636374473572, "avg_line_length": 22.210525512695312, "blob_id": "d9f1d722c022fc782dc9a8bd76691ddd30459d44", "content_id": "25c31875cdf50ade727e1862b948a6839ed7fbb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "no_license", "max_line_length": 68, "num_lines": 19, "path": "/mini-NoteDemo/djagno后台/mynote/apps/note/models.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "# Create your models here.\n\nfrom django.db import models\nfrom django.contrib.auth import get_user_model\n\nUSER = get_user_model()\n\n\nclass NoteModel(models.Model):\n\n user = models.ForeignKey(USER,verbose_name='用户',on_delete=False)\n content = models.TextField(max_length=500,verbose_name='便签内容')\n\n class Meta:\n verbose_name = '便签管理'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.content" }, { "alpha_fraction": 0.641791045665741, "alphanum_fraction": 0.641791045665741, "avg_line_length": 14, "blob_id": "adedbe283f974ffa9ab9313f7f645a25bd99f55a", "content_id": "cf647dd2147993f75993494732bc4a2f5191b7e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 134, "license_type": "no_license", "max_line_length": 33, "num_lines": 9, "path": "/mini-NoteDemo/小程序端/备忘录小程序/pages/index/utils/showToast.js", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "function showToast(title, icon) {\n wx.showToast({\n title: title,\n icon: icon\n })\n}\nmodule.exports = {\n showToast: showToast\n}" }, { "alpha_fraction": 0.5326704382896423, "alphanum_fraction": 0.6136363744735718, "avg_line_length": 28.33333396911621, "blob_id": "c1f8821678891ae0779247e9348212ee93a70345", "content_id": "d068730da97a120ceee050d1b1170a368ea58576", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 720, "license_type": "no_license", "max_line_length": 130, "num_lines": 24, "path": "/mini-NoteDemo/djagno后台/mynote/apps/users/migrations/0002_auto_20190227_1819.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1 on 2019-02-27 10:19\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='userprofile',\n name='add_time',\n field=models.DateTimeField(default=datetime.datetime(2019, 2, 27, 18, 19, 20, 552188), verbose_name='注册时间'),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='user_idv',\n field=models.CharField(default='b0cf37e00de04af5a195a4f7e9b62d61', max_length=50, unique=True, verbose_name='用户唯一ID'),\n ),\n ]\n" }, { "alpha_fraction": 0.5975610017776489, "alphanum_fraction": 0.7439024448394775, "avg_line_length": 10.714285850524902, "blob_id": "8c17b59342c1cde6e803e34ffff68bb0ef2d8507", "content_id": "b6892926e276448d568bfa3ab4c52216fe5eb53b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 82, "license_type": "permissive", "max_line_length": 17, "num_lines": 7, "path": "/mini-NoteDemo/djagno后台/mynote/utils/weixin_util/requirements.txt", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "simplejson==3.6.3\nrequests=2.4.1\nchardet=2.3.0\nsix==1.8.0\nlxml\nxmltodict\npycrypto\n" }, { "alpha_fraction": 0.688524603843689, "alphanum_fraction": 0.688524603843689, "avg_line_length": 19.33333396911621, "blob_id": "872e746094e26d4a4472ecd6f7fa3429812338ea", "content_id": "5df751e06f02e17068c0a7572430b0968b64aff6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/mini-NoteDemo/djagno后台/mynote/mynote/sys_info.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "# 添加上自己的微信小程序id与secret\nMINI_APP_ID = ''\nMINI_APP_SECRET = ''\n" }, { "alpha_fraction": 0.7739726305007935, "alphanum_fraction": 0.7780821919441223, "avg_line_length": 35.525001525878906, "blob_id": "4440ca943e72148250c55753de8dadc3f1c33f0a", "content_id": "e48360c96a3565b70b63897a50f8f2269ca47dd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1460, "license_type": "no_license", "max_line_length": 94, "num_lines": 40, "path": "/mini-NoteDemo/djagno后台/mynote/apps/note/views.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom rest_framework import views\nfrom rest_framework import authentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\nfrom utils.permissions import IsOwnerOrReadOnly\nfrom apps.note.models import NoteModel\nfrom note.serializers import NoteSerializers\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework import mixins\nfrom rest_framework import views,viewsets\n\n# Create your views here.\n\nclass UploadTextView(views.APIView):\n authentication_classes = (authentication.SessionAuthentication,JSONWebTokenAuthentication)\n permission_classes = (IsAuthenticated,IsOwnerOrReadOnly)\n\n def post(self,request):\n user = self.request.user\n content = request.data['content']\n\n note = NoteModel()\n note.user = user\n note.content = content\n note.save()\n return Response({'id':note.id},status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\nclass GetTextView(mixins.ListModelMixin,viewsets.GenericViewSet):\n\n authentication_classes = (authentication.SessionAuthentication,JSONWebTokenAuthentication)\n permission_classes = (IsAuthenticated,IsOwnerOrReadOnly)\n\n def get_queryset(self):\n query = NoteModel.objects.filter(user=self.request.user)\n return query\n serializer_class = NoteSerializers" }, { "alpha_fraction": 0.5895542502403259, "alphanum_fraction": 0.5940206050872803, "avg_line_length": 37.49473571777344, "blob_id": "493f5c50bcbb1cb24e8a15f2cfdc3cce5ec9077c", "content_id": "eca279de96dbcb4439fd00fe7cf42ec717c610bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11657, "license_type": "no_license", "max_line_length": 109, "num_lines": 285, "path": "/mini-NoteDemo/djagno后台/mynote/apps/users/views.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "# coding=utf-8\n\nimport datetime\nfrom mynote.sys_info import MINI_APP_ID,MINI_APP_SECRET #导入小程序开发者的id与secret\nfrom apps.users.models import UserProFile\nfrom apps.users.Serializers import UserRegSerializer\nfrom rest_framework import mixins\nfrom rest_framework.mixins import CreateModelMixin\nfrom rest_framework import views,viewsets\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\nfrom rest_framework import authentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom utils.weixin_util.weixin import WXAPPAPI\nfrom utils.weixin_util.weixin.lib.wxcrypt import WXBizDataCrypt\nfrom rest_framework.response import Response\nfrom django.contrib.auth.hashers import make_password\nfrom rest_framework import status\nfrom rest_framework_jwt.views import JSONWebTokenAPIView\nfrom rest_framework_jwt.settings import api_settings\nfrom rest_framework_jwt.serializers import (\n JSONWebTokenSerializer\n)\nfrom utils.permissions import IsOwnerOrReadOnly\n\n\n\njwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n\nclass Registered(CreateModelMixin,mixins.UpdateModelMixin,mixins.RetrieveModelMixin,viewsets.GenericViewSet):\n\n serializer_class = UserRegSerializer\n queryset = UserProFile.objects.all()\n\n # authentication_classes = (authentication.SessionAuthentication,JSONWebTokenAuthentication) #认证\n\n def get_permissions(self):\n if self.action =='retrieve':\n return [IsAuthenticated()]\n elif self.action =='create':\n return []\n return []\n\n pass\n\n def create(self, request, *args, **kwargs):\n api = WXAPPAPI(appid=MINI_APP_ID,app_secret=MINI_APP_SECRET)\n code = request.data['code'] #获取code\n session_info = api.exchange_code_for_session_key(code=code)\n session_key = session_info.get('session_key')\n crypt = WXBizDataCrypt(MINI_APP_ID, session_key)\n encrypted_data = request.data['username'] # 获取到encrypted_data\n iv = request.data['password'] # 获取到iv\n user_info = crypt.decrypt(encrypted_data, iv) # 获取到用户的登陆信息\n # 获取用户的信息\n openid = user_info['openId'] # 获取openid\n avatarUrl = user_info['avatarUrl'] # 获取头像\n country = user_info['country'] # 获取国家\n province = user_info['province'] # 获取城市\n city = user_info['city'] # 获取区域\n gender = user_info['gender'] # 获取性别\n language = user_info['language'] # 获取语言\n nickName = user_info['nickName'] # 获取昵称\n\n # # 保存用户头像到本地\n # avatarPath = os.path.join(BASE_DIR, 'upload/UserProFilebg/avatar/')\n # avatarGet = requests.get(avatarUrl)\n # avatar_name = avatarPath + openid + '.png'\n # image = Image.open(BytesIO(avatarGet.content))\n # image.save(avatar_name)\n\n # 判断用户是否存在\n if UserProFile.objects.filter(openid=openid):\n this_user = UserProFile.objects.filter(openid=openid)\n this_user.nick_name = nickName # 更新用户的微信昵称\n this_user.avatarUrl = avatarUrl # 更新用户微信头像\n this_user.gender = str(gender) # 更新用户的性别\n # this_user.avatar = 'avatar/' + openid + '.png'\n this_user.update()\n return Response(status=status.HTTP_400_BAD_REQUEST)\n else:\n # 保存用户信息\n if len(nickName) > 6:\n nickName = nickName[0:6]\n user_info_save = UserProFile()\n user_info_save.openid = openid # 保存用户openid\n user_info_save.avatarUrl = avatarUrl # 保存用户微信头像\n user_info_save.country = country # 保存用户所在的国家\n user_info_save.province = province # 保存用户所在的城市\n user_info_save.city = city # 保存用户所在的区域\n # user_info_save.avatar = 'UserProFilebg/avatar/' + openid + '.png'\n user_info_save.gender = str(gender) # 保存用户的性别\n user_info_save.language = language # 保存用户当前使用的语言\n user_info_save.nick_name = nickName # 保存用户的微信昵称\n # user_info_save.name = nickName # 用户原始的用户名\n user_info_save.username = openid # 保存用户的昵称\n user_info_save.password = make_password(openid) # 保存用户的密码\n # user_info_save.zhong_jifen = 0\n user_info_save.save()\n\n return Response(status=status.HTTP_201_CREATED)\n\n def get_object(self): # 重新来自generic view中的方法\n '''\n :return:\n '''\n return self.request.user\n\n def perform_create(self, serializer):\n '''\n :param serializer:\n :return:\n '''\n return serializer.save()\n\n\n\n\n\nclass READJSONWebTokenAPIView(JSONWebTokenAPIView):\n \"\"\"\n API View that receives a POST with a user's username and password.\n\n Returns a JSON Web Token that can be used for authenticated requests.\n \"\"\"\n\n def get_serializer_context(self):\n \"\"\"\n Extra context provided to the serializer class.\n \"\"\"\n try:\n username = self.request.data\n\n api = WXAPPAPI(appid=MINI_APP_ID, app_secret=MINI_APP_SECRET)\n code = username['code'] # 获取到code\n session_info = api.exchange_code_for_session_key(code=code)\n session_key = session_info.get('session_key')\n crypt = WXBizDataCrypt(MINI_APP_ID, session_key)\n encrypted_data = username['username'] # 获取到encrypted_data\n iv = username['password'] # 获取到iv\n user_info = crypt.decrypt(encrypted_data, iv) # 获取到用户的登陆信息\n\n # 获取用户的信息\n openid = user_info['openId'] # 获取openid\n avatarUrl = user_info['avatarUrl'] # 获取到头像\n nickName = user_info['nickName'] # 获取昵称\n # 找到用户更新用户的微信昵称和头像\n this_user = UserProFile.objects.filter(openid=openid)\n\n\n if this_user:\n this_user = this_user[0]\n this_user.avatarUrl = avatarUrl\n this_user.nick_name = nickName\n # this_user.avatar = 'avatar/' + openid + '.png'\n this_user.save()\n\n username['username'] = openid\n username['password'] = openid\n del username['code']\n except:\n pass\n\n return {\n 'request': self.request,\n 'view': self,\n }\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n user = serializer.object.get('user') or request.user\n token = serializer.object.get('token')\n response_data = jwt_response_payload_handler(token, user, request)\n response = Response(response_data)\n if api_settings.JWT_AUTH_COOKIE:\n expiration = (datetime.utcnow() +\n api_settings.JWT_EXPIRATION_DELTA)\n response.set_cookie(api_settings.JWT_AUTH_COOKIE,\n token,\n expires=expiration,\n httponly=True)\n return response\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n\nclass ObtainJSONWebToken(READJSONWebTokenAPIView):\n \"\"\"\n API View that receives a POST with a user's username and password.\n\n Returns a JSON Web Token that can be used for authenticated requests.\n\n 接收用户的用户名和密码,然后返回JWT用于认证请求\n \"\"\"\n serializer_class = JSONWebTokenSerializer\n\n\n\n\n\n# class GetUser(views.APIView):\n# '''\n# 修改和获取用户的个人信息\n# '''\n# authentication_classes = (authentication.SessionAuthentication, JSONWebTokenAuthentication) # Token验证\n# permission_classes = (IsAuthenticated, IsOwnerOrReadOnly)\n#\n# def get(self, request):\n# '''\n# 获取用户信息\n# :param request:\n# :return:\n# '''\n# name = self.request.user.name\n# avatar = self.request.user.avatar\n# thesignature = self.request.user.thesignature\n# background = self.request.user.background\n# gender = self.request.user.gender\n# # birthay = self.request.user.birthay\n# nickName = self.request.user.nickName\n# # mobile = self.request.user.mobile\n# if gender == '1':\n# gender = '男'\n# else:\n# gender = '女'\n# user_info = {\n# 'name': name,\n# # 'avatar': IMAGES_URL + 'upload/' + str(avatar),\n# 'thesignature': thesignature,\n# 'gender': gender,\n# 'nickName': nickName,\n# # 'mobile': mobile,\n# # 'birthay': datetime.datetime.strftime(birthay, \"%Y-%m-%d\"),\n# # 'background': IMAGES_URL + 'upload/' + str(background)\n# }\n# return Response(user_info, status=status.HTTP_200_OK)\n#\n# def post(self, request, format=None):\n# '''\n# 修改用户个人信息\n# :param request:\n# :return:\n# '''\n# try:\n# type = request.data['types']\n# except:\n# type = None\n# # if (type != None) and (image_files != None):\n# if type == 'GHTX':\n# image_files = request.data['file']\n# self.request.user.avatar = image_files\n# self.request.user.save()\n# return Response(status=status.HTTP_200_OK)\n# elif type == 'GHBJ':\n# image_files = request.data['file']\n# self.request.user.background = image_files\n# self.request.user.save()\n# return Response(status=status.HTTP_200_OK)\n# elif type == 'GHXB':\n# self.request.user.gender = request.data['new_shengri']\n# self.request.user.save()\n# return Response(status=status.HTTP_200_OK)\n# elif type == 'GHSRI':\n# self.request.user.birthay = request.data['sr']\n# self.request.user.save()\n# return Response(status=status.HTTP_200_OK)\n# elif type == 'GHNAME':\n# name_all = UserProFile.objects.filter(name=request.data['new_name'])\n# if name_all:\n# return Response({'message': '昵称已存在'}, status=status.HTTP_202_ACCEPTED)\n# self.request.user.name = request.data['new_name']\n# self.request.user.save()\n# return Response({'message': '昵称更改成功'}, status=status.HTTP_200_OK)\n# elif type == 'GHPHONE':\n# phone_all = UserProFile.objects.filter(mobile=request.data['new_phone'])\n# if phone_all:\n# return Response({'message': '手机号已存在'}, status=status.HTTP_202_ACCEPTED)\n# self.request.user.mobile = request.data['new_phone']\n# self.request.user.save()\n# return Response({'message': '手机号已更换'}, status=status.HTTP_200_OK)\n# elif type == 'thesignature':\n# self.request.user.thesignature = request.data['new_thesignature']\n# self.request.user.save()\n# return Response({'message': '签名已更新'}, status=status.HTTP_200_OK)\n# return Response(status=status.HTTP_401_UNAUTHORIZED)\n" }, { "alpha_fraction": 0.6802030205726624, "alphanum_fraction": 0.6802030205726624, "avg_line_length": 13.142857551574707, "blob_id": "996b92757c6e2fadea773cebb78d38301d810d98", "content_id": "567d4edbe7c4bb4f44b6a2460ba8b27ea540a7f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 45, "num_lines": 14, "path": "/mini-NoteDemo/djagno后台/mynote/apps/note/adminx.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "import xadmin\nfrom apps.note.models import NoteModel\n\nclass NoteAdmin(object):\n '''\n 用户表显示\n '''\n\n list_display = ['user','content'] #后台显示类型\n\n\n\n\nxadmin.site.register(NoteModel,NoteAdmin)" }, { "alpha_fraction": 0.7346938848495483, "alphanum_fraction": 0.7346938848495483, "avg_line_length": 26.5625, "blob_id": "cdbe14ba77bd79b699350aaa3419f9abcb2da577", "content_id": "7e1fb491e56b65b6689058341a293d111eea7cea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 72, "num_lines": 16, "path": "/mini-NoteDemo/djagno后台/mynote/apps/users/urls.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "from django.conf.urls import include\nfrom rest_framework.routers import DefaultRouter\nfrom django.urls import path\n\n\nfrom apps.users.views import Registered,ObtainJSONWebToken\n\n\nrouter = DefaultRouter()\nrouter.register(r'Registered', Registered, base_name='Registered') # 注册\n\nurlpatterns = [\n path('', include(router.urls)),\n path('login/',ObtainJSONWebToken.as_view()),\n # path('GetUser/',GetUser.as_view(),name='GetUser'),\n ]\n" }, { "alpha_fraction": 0.49857550859451294, "alphanum_fraction": 0.7015669345855713, "avg_line_length": 15.91566276550293, "blob_id": "74b34510d84fa40690610468130c81f0b27aef64", "content_id": "b57e30f244dc87572d6442b76e150debaea8b0f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1404, "license_type": "no_license", "max_line_length": 31, "num_lines": 83, "path": "/mini-NoteDemo/djagno后台/mynote/requirements.txt", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "appnope==0.1.0\nargh==0.26.2\nasgi-redis==1.4.3\nasgiref==1.1.2\nasn1crypto==0.24.0\nattrs==17.3.0\nautobahn==18.3.1\nAutomat==0.6.0\nbeautifulsoup4==4.6.0\nbleach==2.1.3\nbs4==0.0.1\ncertifi==2018.1.18\ncffi==1.11.2\nchannels==1.1.8\nchardet==3.0.4\nclick==6.7\ncloudinary==1.11.0\nconfigparser==3.5.0\nconstantly==15.1.0\ncoreapi==2.3.3\ncoreschema==0.0.4\ncoverage==4.4.2\ncycler==0.10.0\ndaphne==1.4.2\ndefusedxml==0.5.0\ndiff-match-patch==20181111\nDjango==2.1\ndjango-cors-headers==2.4.0\ndjango-crispy-forms==1.7.2\ndjango-filter==2.1.0\ndjango-formtools==2.1\ndjango-import-export==1.2.0\ndjango-redis==4.10.0\ndjangorestframework==3.9.1\ndjangorestframework-jwt==1.11.0\net-xmlfile==1.0.1\nfuture==0.17.1\nhtml5lib==1.0.1\nhttplib2==0.12.1\nhyperlink==17.3.1\nidna==2.6\nincremental==17.5.0\nitypes==1.1.0\njdcal==1.4\nJinja2==2.10\nkiwisolver==1.0.1\nMarkupSafe==1.0\nmatplotlib==3.0.2\nmock==2.0.0\nmsgpack-python==0.5.6\nNaked==0.1.31\nnumpy==1.16.1\nodfpy==1.4.0\nopenpyxl==2.6.0\npbr==3.1.1\nPillow==5.4.1\npsycopg2==2.7.7\npsycopg2-binary==2.7.7\npycparser==2.18\npycryptodome==3.7.3\nPyHamcrest==1.9.0\nPyJWT==1.7.1\nPyMySQL==0.9.3\npyparsing==2.3.1\npython-dateutil==2.8.0\npytz==2017.3\nPyYAML==3.12\nredis==2.10.6\nrequests==2.18.4\nreversion==0.2\nshellescape==3.4.1\nsix==1.11.0\ntablib==0.12.1\ntoml==0.10.0\nTwisted==18.9.0\ntxaio==2.9.0\nunicodecsv==0.14.1\nuritemplate==3.0.0\nurllib3==1.22\nwebencodings==0.5.1\nxlrd==1.2.0\nxlwt==1.3.0\nzope.interface==4.4.3\n" }, { "alpha_fraction": 0.6393728256225586, "alphanum_fraction": 0.6445993185043335, "avg_line_length": 37.266666412353516, "blob_id": "f35898d29b4d24b7ea3ae4ed3ff17b961f21a507", "content_id": "2caec11c54b5eb90d653766b13d6b7468b2c0a8b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 574, "license_type": "permissive", "max_line_length": 80, "num_lines": 15, "path": "/mini-NoteDemo/djagno后台/mynote/utils/weixin_util/setup.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n\nsetup(name=\"python-weixin\",\n version=\"0.2.3\",\n description=\"Python Weixin API client support wechat-app\",\n license=\"BSD\",\n install_requires=[\"simplejson\", \"requests\", \"six\", \"chardet\"],\n author=\"Zongxiao Cheng\",\n author_email=\"[email protected]\",\n url=\"https://github.com/gusibi/python-weixin\",\n download_url=\"https://github.com/gusibi/python-weixin/archive/master.zip\",\n packages=find_packages(),\n keywords=[\"python-weixin\", \"weixin\", \"wechat\", \"sdk\"],\n zip_safe=True)\n" }, { "alpha_fraction": 0.5017709732055664, "alphanum_fraction": 0.5017709732055664, "avg_line_length": 19.190475463867188, "blob_id": "9a46b215eacf685f16a6fd75943ae3818b6ac17a", "content_id": "615c8702d5b7d83b339759e0de7627397ed1bb65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 875, "license_type": "no_license", "max_line_length": 63, "num_lines": 42, "path": "/mini-NoteDemo/小程序端/备忘录小程序/pages/index/utils/request.js", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "const app = getApp()\n\n// 封装网络请求模块\nfunction Request(url, data = {}, method) {\n var url = url\n var data = data\n var method = method\n if (method == 'GET') {\n var get_data = new Promise(function(resolve, reject) {\n wx.request({\n url: url,\n header: {\n 'Authorization': 'JWT ' + app.globalData.jwt // 默认值\n },\n method: method,\n data: data,\n success: resolve\n })\n })\n return get_data\n }\n if (method == 'POST') {\n var post_data = new Promise(function (resolve, reject) {\n wx.request({\n url: url,\n header: {\n 'Authorization': 'JWT ' + app.globalData.jwt // 默认值\n },\n method: method,\n data:data,\n success: resolve\n })\n })\n return post_data\n }\n}\n\n\n\nmodule.exports = {\n request: Request,\n}" }, { "alpha_fraction": 0.4602169990539551, "alphanum_fraction": 0.47061482071876526, "avg_line_length": 25.656625747680664, "blob_id": "0157e70b323dceefe68ed336431863d0b8322a58", "content_id": "e9d7de62a0d5640500116a622dee38f6d99965f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5386, "license_type": "no_license", "max_line_length": 103, "num_lines": 166, "path": "/mini-NoteDemo/小程序端/备忘录小程序/pages/index/app.js", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "//app.js\nApp({\n onLaunch: function() {\n //调用API从本地缓存中获取数据\n var jwt = wx.getStorageSync('jwt');\n var that = this;\n if (!jwt) {\n //检查 jwt 是否存在 如果不存在调用登录\n console.log(\"1\")\n //jwt不存在调用login()函数\n that.login()\n } else {\n console.log(\"2\")\n //jsw存在\n this.globalData.jwt = jwt\n }\n },\n\n login: function (e) {\n var userinfo = e\n // 登录部分代码\n var that = this;\n wx.login({\n // 调用 login 获取 code\n success: function (res) {\n var code = res.code;\n console.log(code)\n console.log(\"login()获取code成功\")\n try {\n that.globalData.userInfo = userinfo.detail.userInfo;\n var encryptedData = userinfo.detail.encryptedData || 'encry';\n var iv = userinfo.detail.iv || 'iv';\n } catch (e) {\n console.log(\"userInfo为null\")\n return false\n }\n wx.request({ // 发送请求 获取 jwt\n url: \"http://127.0.0.1:8000/users/login/\",\n header: {\n Authorization: 'JWT' + that.globalData.access_token,\n },\n data: {\n username: encryptedData,\n password: iv,\n code: code,\n },\n method: \"POST\",\n success: function (res) {\n if (res.statusCode === 200) {\n // 得到 jwt 后存储到 storage,\n console.log(res.data.token)\n wx.showToast({\n title: '登录成功',\n icon: 'success'\n });\n wx.setStorage({\n key: \"jwt\",\n data: res.data.token\n });\n that.globalData = res.data\n that.globalData.jwt = res.data.token\n that.globalData.access_token = res.data.token;\n that.globalData.account_id = res.data.sub;\n } else if (res.statusCode === 400) {\n // 如果没有注册调用注册接口\n console.log(\"调用register()函数\")\n that.register(userinfo);\n } else {\n // 提示错误信息\n wx.showToast({\n title: res.data.text,\n icon: 'success',\n duration: 2000\n });\n }\n },\n fail: function (res) { }\n })\n }\n })\n\n },\n register: function (e) {\n // 注册代码\n var that = this;\n var userinfo = e\n wx.login({ // 调用登录接口获取 code\n success: function (res) {\n console.log(\"register()时获取code成功\")\n var code = res.code;\n try {\n that.globalData.userInfo = userinfo.detail.userInfo;\n var encryptedData = userinfo.detail.encryptedData || 'encry';\n var iv = userinfo.detail.iv || 'iv';\n } catch (e) {\n console.log('shibai')\n return false\n }\n wx.request({ // 请求注册用户接口\n url: \"http://127.0.0.1:8000/users/Registered/\",\n header: {\n // Authorization: config.basic_token\n },\n data: {\n username: encryptedData,\n password: iv,\n code: code,\n },\n method: \"POST\",\n success: function (res) {\n if (res.statusCode == 201) {\n console.log(\"@@@\")\n that.login(userinfo);\n }\n if (res.statusCode == 401) {\n console.log(\"!!!\") \n that.register(userinfo);\n }\n },\n fail: function (res) { \n console.log(\"?????\")\n }\n })\n\n }\n })\n\n },\n globalData: {\n userInfo: null,\n jwt:null,\n note: [{\n id: '1',\n content: '我们都知道,Django是一种基于Python的Web开发框架。',\n },\n {\n id: '2',\n content: '那么,什么是Web开发?Web开发指的是开发基于B/S架构,通过前后端的配合,将后台服务器的数据在浏览器上展现给前台用户的应用',\n },\n {\n id: '3',\n content: '比如将电子购物网站的商品数据在浏览器上展示给客户,在基于浏览器的学校系统管理平台上管理学生的数据,监控机房服务器的状态并将结果以图形化的形式展现出来等等。',\n },\n {\n id: '4',\n content: '使用Python开发Web应用,最简单、原始和直接的办法是使用CGI标准,在二十年前这种方式很流行。它是如何做的呢? ',\n },\n {\n id: '5',\n content: '服务过程是这样的:首先,用户请求CGI,脚本代码打印Content-Type行等一些HTML的起始标签,然后连接数据库并执行一些查询操作,获取最新的十件商品的相关数据',\n },\n {\n id: '6',\n content: '在遍历这些商品的同时,生成一个商品的HTML列表项,然后输出HTML的结束标签并且关闭数据库连接',\n },\n {\n id: '7',\n content: '如果应用中有多处需要连接数据库会怎样呢?每个独立的CGI脚本,不应该重复编写数据库连接相关的代码。',\n },\n {\n id: '8',\n content: '如果代码被重用到一个复合的环境中会发生什么?',\n },\n ],\n }\n})" }, { "alpha_fraction": 0.6642969846725464, "alphanum_fraction": 0.6872037649154663, "avg_line_length": 33.24324417114258, "blob_id": "1ddd9313daebe1607cf7bf593d0937f6fba84419", "content_id": "32002c62218ca72e877a757df5770c8d26d6a5ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1410, "license_type": "no_license", "max_line_length": 99, "num_lines": 37, "path": "/mini-NoteDemo/djagno后台/mynote/apps/users/models.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom datetime import datetime\nfrom uuid import uuid4\nfrom django.contrib.auth.models import AbstractUser\n\n# Create your models here.\n\nclass UserProFile(AbstractUser):\n\n '''\n 用户表\n '''\n\n Gender = {\n ('1','男'),\n ('2','女'),\n }\n\n openid = models.CharField(max_length=200,default='',verbose_name='用户微信唯一ID') #微信后台服务器提供唯一ID\n user_id = models.CharField(max_length=50,default=uuid4().hex,unique=True,verbose_name='用户唯一ID')\n\n nick_name = models.CharField(max_length=20,verbose_name='用户微信昵称')\n gender = models.CharField(max_length=10,choices=Gender,default='1',verbose_name='性别')\n avatarUrl = models.URLField(max_length=500,default='',verbose_name='用户微信头像url')\n country = models.CharField(max_length=100,default='',verbose_name='用户微信国家')\n province = models.CharField(max_length=100,default='',verbose_name='用户微信省份')\n city = models.CharField(max_length=100,default='',verbose_name='用户微信城市')\n language = models.CharField(max_length=100,default='',verbose_name='用户微信语言')\n\n add_time = models.DateTimeField(default=datetime.now(),verbose_name='注册时间')\n\n class Meta:\n verbose_name = '用户管理'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.nick_name" }, { "alpha_fraction": 0.6949541568756104, "alphanum_fraction": 0.6949541568756104, "avg_line_length": 28.133333206176758, "blob_id": "c33fb9d711914924bbd7143212214867512db3c2", "content_id": "1b03dffeeaa29eb040327fb5c514a8553f542624", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 77, "num_lines": 15, "path": "/mini-NoteDemo/djagno后台/mynote/apps/users/adminx.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "import xadmin\nfrom apps.users.models import UserProFile\n\nclass UserProFileAdmin(object):\n '''\n 用户表显示\n '''\n\n list_display = ['nick_name','gender','country','province','city'] #后台显示类型\n search_fields = ['nick_name','gender','country','province','city'] #设置搜索\n list_filter = ['nick_name','gender','country','province','city'] # 搜索过滤器\n\n\nxadmin.site.unregister(UserProFile)\nxadmin.site.register(UserProFile,UserProFileAdmin)" }, { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 23.88888931274414, "blob_id": "b7988e83d2250055aecb29889935f2063d5800a8", "content_id": "544ab3b0672bda6425b76312d27f50776e8ca7a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 388, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/README.md", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "# mini-NoteDemo\n微信小程序备忘录demo,Django+restframework最为后台\n\n## 概述\n+ 小程序端界面比较简陋,界面采用*瀑布流*展示备忘录内容。\n+ 后端采用Django框架,并且使用*restframework*作为小程序的API接口\n## 注意\n+ django后端所引用的包放在了requiremetns.txt文件中\n+ pip install -r requiremtns.txt 指令下载所需依赖\n" }, { "alpha_fraction": 0.5288865566253662, "alphanum_fraction": 0.5404411554336548, "avg_line_length": 14, "blob_id": "970e03f9a20d0b255ffc0701754a56a3301e3384", "content_id": "8f94d973bca500bc3a31abd1d84dcbc9efa29338", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2216, "license_type": "no_license", "max_line_length": 60, "num_lines": 127, "path": "/mini-NoteDemo/小程序端/备忘录小程序/pages/index/pages/new/new.js", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "// pages/new/new.js\nconst app = getApp()\nvar accounts = app.globalData.note\nvar Request = require(\"../../utils/request.js\")\nvar showToast = require(\"../../utils/showToast.js\")\nPage({\n\n /**\n * 页面的初始数据\n */\n data: {\n newItem: {\n id: \"\",\n content: \"\"\n }\n\n },\n //保存数据函数\n onSubmit: function (event) {\n //赋值content\n var item = this.data.newItem;\n item.content = event.detail.value.content;\n //赋值id\n // var accounts = app.globalData.note;\n console.log(accounts.length)\n item.id = String(accounts.length + 1);\n //赋值newItem\n this.setData({\n newItem: item\n });\n\n\n\n\n\n var data = {\n content:this.data.newItem.content\n }\n var url = \"http://127.0.0.1:8000/note/upnote/\"\n Request.request(url,data,\"POST\").then(function(Request){\n if(Request.statusCode == 200){\n showToast.showToast(\"发布成功\",\"success\")\n }\n if(Request.statusCode == 401){\n showToast.showToast(\"发布失败\", \"none\")\n \n }\n\n },function(error){});\n\n\n\n\n\n //全局变量note增加上newItem数据\n app.globalData.note.push(this.data.newItem);\n //把新数据添加到首页来显示\n var pages = getCurrentPages()\n pages[pages.length - 2].setData({\n item: accounts\n })\n wx.showToast({\n title: '保存成功',\n });\n setTimeout(function () {\n wx.hideToast();\n wx.navigateBack({\n })\n }, 1000)\n },\n\n /**\n * 生命周期函数--监听页面加载\n */\n onLoad: function (options) {\n\n },\n\n /**\n * 生命周期函数--监听页面初次渲染完成\n */\n onReady: function () {\n\n },\n\n /**\n * 生命周期函数--监听页面显示\n */\n onShow: function () {\n\n },\n\n /**\n * 生命周期函数--监听页面隐藏\n */\n onHide: function () {\n\n },\n\n /**\n * 生命周期函数--监听页面卸载\n */\n onUnload: function () {\n\n },\n\n /**\n * 页面相关事件处理函数--监听用户下拉动作\n */\n onPullDownRefresh: function () {\n\n },\n\n /**\n * 页面上拉触底事件的处理函数\n */\n onReachBottom: function () {\n\n },\n\n /**\n * 用户点击右上角分享\n */\n onShareAppMessage: function () {\n\n }\n})" }, { "alpha_fraction": 0.5057361125946045, "alphanum_fraction": 0.576481819152832, "avg_line_length": 30.696969985961914, "blob_id": "39f411e9d85756b2f77e8080fbba4cb37ec625bc", "content_id": "8b3e670ecec06ffd3deb6646680a5d68026efee5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1070, "license_type": "no_license", "max_line_length": 130, "num_lines": 33, "path": "/mini-NoteDemo/djagno后台/mynote/apps/users/migrations/0003_auto_20190227_1823.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1 on 2019-02-27 10:23\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0002_auto_20190227_1819'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='userprofile',\n name='user_idv',\n ),\n migrations.AddField(\n model_name='userprofile',\n name='user_id',\n field=models.CharField(default='af772fde2b724341ad632e2bf3a7c2b9', max_length=50, unique=True, verbose_name='用户唯一ID'),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='add_time',\n field=models.DateTimeField(default=datetime.datetime(2019, 2, 27, 18, 23, 51, 602801), verbose_name='注册时间'),\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='gender',\n field=models.CharField(choices=[('2', '女'), ('1', '男')], default='1', max_length=10, verbose_name='性别'),\n ),\n ]\n" }, { "alpha_fraction": 0.7447306513786316, "alphanum_fraction": 0.7447306513786316, "avg_line_length": 29.571428298950195, "blob_id": "55924258c442dcbf302a5fb8a0311c09d5ed48fe", "content_id": "182c5f975fd189c803da89467f6165e083804626", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 427, "license_type": "no_license", "max_line_length": 69, "num_lines": 14, "path": "/mini-NoteDemo/djagno后台/mynote/apps/note/urls.py", "repo_name": "jixin456/mini-NoteDemo", "src_encoding": "UTF-8", "text": "from rest_framework.routers import DefaultRouter\nfrom django.urls import path\nfrom apps.note.views import UploadTextView,GetTextView\nfrom django.conf.urls import include\n\nrouter = DefaultRouter()\n\n# router.register('upnote',UploadTextView,base_name='UploadTextView')\n\nurlpatterns = [\n # path('',include(router.urls)),\n path('upnote/',UploadTextView.as_view()),\n path('getnote/',GetTextView.as_view({'get': 'list'})),\n]" } ]
24
Geeorgee23/Socios_cooperativa_MVC
https://github.com/Geeorgee23/Socios_cooperativa_MVC
481ecafadc67c0f45f5972d3d67746a02bbe1f58
befd2d76b902838af8437cc9a90f5d45ef2b21c3
779d98659c65b5159d06110cc1b50fbcc9589f25
refs/heads/master
2023-04-13T16:36:39.975755
2021-04-27T11:25:29
2021-04-27T11:25:29
362,085,899
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5715543627738953, "alphanum_fraction": 0.5763980746269226, "avg_line_length": 27.337499618530273, "blob_id": "d4b1aac3ea5b440ef8ef57ea918f4b82f33c4f3d", "content_id": "4d6b55f16eb3cc6f715a9cacb968d97384660816", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2271, "license_type": "no_license", "max_line_length": 295, "num_lines": 80, "path": "/controlador.py", "repo_name": "Geeorgee23/Socios_cooperativa_MVC", "src_encoding": "UTF-8", "text": "from socios import Socios\n\n\nclass Controlador:\n\n def __init__(self):\n self.listaSocios={}\n self.productos = { 'Naranja':40,\n 'Oliva':10,\n 'Caqui':20 }\n\n\n def numSocios(self):\n return len(self.listaSocios)\n\n \n def addSocio(self,socio):\n if (socio.getIdSocio() not in self.listaSocios):\n self.listaSocios[socio.getIdSocio()]=socio\n return True\n\n return False\n\n\n def delSocio(self,id_socio):\n if id_socio in self.listaSocios:\n del self.listaSocios[id_socio]\n return True\n\n return False\n\n\n def listarSocios(self):\n lista=[]\n for clave,valor in self.listaSocios.items():\n lista.append(\"Id_socio: \"+clave+\"\\n\\tDni: \"+valor.getDni()+\"\\n\\tNombre: \"+valor.getNombre()+\"\\n\\tApellidos: \"+valor.getApellidos()+\"\\n\\tfecha: \"+valor.getFecha()+\"\\n\\tSaldo: \"+str(valor.getSaldo()))\n\n return lista\n\n\n def getProductos(self):\n lista=\"\"\n for i in self.productos:\n lista +=\"\\t\"+i+\"\\n\"\n \n return lista\n\n\n\n\n def addProducto(self,id_socio,producto,kilos):\n if id_socio in self.listaSocios:\n if producto in self.productos:\n self.listaSocios[id_socio].addProducto(producto,kilos)\n return True\n return False\n\n\n\n\n def actualizaSaldo(self,id_socio):\n saldo=0.0\n if id_socio in self.listaSocios:\n for clave,valor in self.listaSocios[id_socio].getRegistrosPendientes().items():\n saldo+= self.productos[clave] * float(valor)\n\n self.listaSocios[id_socio].actualizaSaldo(saldo)\n self.listaSocios[id_socio].delRegistros()\n return True\n\n return False\n\n\n def fichaSocio(self,id_socio):\n socio=\"\"\n if id_socio in self.listaSocios:\n for clave,valor in self.listaSocios.items():\n socio = (\"Id_socio: \"+clave+\"\\n\\tDni: \"+valor.getDni()+\"\\n\\tNombre: \"+valor.getNombre()+\"\\n\\tApellidos: \"+valor.getApellidos()+\"\\n\\tfecha: \"+valor.getFecha()+\"\\n\\tSaldo: \"+str( \"{:10.2f}\".format(valor.getSaldo()))+\"\\n\\tRegistros Pendientes: \"+str(valor.getRegistrosPendientes()))\n\n return socio\n\n\n\n\n" }, { "alpha_fraction": 0.5498866438865662, "alphanum_fraction": 0.5566893219947815, "avg_line_length": 20.504064559936523, "blob_id": "dba0492edad611c00d9d0009778881147a7d42ef", "content_id": "3101b6e8a7b0216da040e3361bf24921f84666ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2652, "license_type": "no_license", "max_line_length": 64, "num_lines": 123, "path": "/main.py", "repo_name": "Geeorgee23/Socios_cooperativa_MVC", "src_encoding": "UTF-8", "text": "from socios import Socios\nfrom controlador import Controlador\nfrom datetime import datetime\n\n\ncontrolador = Controlador()\n\n\nwhile True:\n\n print(\"Actualmente hay \",controlador.numSocios(),\" socios\")\n print(\"1.- Añadir Socio\")\n print(\"2.- Eliminar Socio\")\n print(\"3.- Listar Socios\")\n print(\"4.- Registrar Productos\")\n print(\"5.- Actualizar Saldo\")\n print(\"6.- Ficha de Socio\")\n print(\"7.- Salir\")\n\n\n while True:\n try:\n\n op=int(input(\"Introduce opción:\"))\n\n if op>=1 and op<=7:\n break\n else: \n print(\"Introduce un numero del 1 al 7!\")\n\n except ValueError:\n print(\"Introduce un numero!\")\n\n\n if op==7:\n break\n\n\n if op==1:\n print()\n id_socio=input(\"Introduce el id del socio: \")\n dni=input(\"Introduce el dni del socio: \")\n nombre=input(\"Introduce el nombre del socio: \")\n apellidos=input(\"Introduce los apellidos del socio: \")\n fecha= datetime.now()\n hoy = str(fecha.strftime(\"%d-%m-%Y\"))\n\n socio = Socios(id_socio,dni,nombre,apellidos,hoy)\n\n if controlador.addSocio(socio):\n print(\"Socio añadido correctamente!\")\n else:\n print(\"Error al añadir el socio!\")\n\n print()\n\n\n if op==2:\n print()\n id_socio=input(\"Introduce el id del socio a eliminar: \")\n\n\n if controlador.delSocio(id_socio):\n print(\"Socio eliminado correctamente!\")\n else:\n print(\"Error al eliminar el socio!\")\n\n print()\n\n\n if op ==3:\n print()\n print(\"Socios: \")\n\n for i in controlador.listarSocios():\n print(i)\n\n print()\n\n\n\n if op ==4:\n print()\n print(\"Registrando productos...\")\n id_socio=input(\"Introduce el id del socio: \")\n\n print(\"Productos:\")\n print(controlador.getProductos())\n producto=input(\"Introduce el nombre del producto: \")\n\n kilos=input(\"Introduce el numero de kilos: \")\n\n if controlador.addProducto(id_socio,producto,kilos):\n print(\"Producto añadido correctamente!\")\n else:\n print(\"Error al añadir el producto!\")\n\n print()\n\n if op ==5:\n print()\n\n id_socio=input(\"Introduce el id del socio: \")\n if controlador.actualizaSaldo(id_socio):\n print(\"Saldo actualizado correctamente!\")\n else:\n print(\"Error al actualizar saldo!\")\n\n\n\n print()\n\n\n if op==6:\n print()\n\n\n id_socio=input(\"Introduce el id del socio: \")\n\n print(controlador.fichaSocio(id_socio))\n\n\n print()\n\n" }, { "alpha_fraction": 0.6180757880210876, "alphanum_fraction": 0.6200194358825684, "avg_line_length": 21.2391300201416, "blob_id": "320e3a1cdc7e7dc79172bbf843498b9e5752a26d", "content_id": "b1f2e4ed5e54268e2c6fbc62d50815e2a77f94c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1029, "license_type": "no_license", "max_line_length": 59, "num_lines": 46, "path": "/socios.py", "repo_name": "Geeorgee23/Socios_cooperativa_MVC", "src_encoding": "UTF-8", "text": "class Socios:\n\n def __init__(self,id_socio,dni,nombre,apellidos,fecha):\n self.id_socio=id_socio\n self.dni=dni\n self.nombre=nombre\n self.apellidos=apellidos\n self.fecha=fecha\n self.saldo=0.0\n self.registrosPendientes={}\n\n\n def getIdSocio(self):\n return self.id_socio\n \n def getDni(self):\n return self.dni\n \n def getNombre(self):\n return self.nombre\n\n def getApellidos(self):\n return self.apellidos\n\n def getFecha(self):\n return self.fecha\n\n def getSaldo(self):\n return self.saldo\n\n def getRegistrosPendientes(self):\n return self.registrosPendientes\n\n def addProducto(self,producto,kilos):\n if producto in self.registrosPendientes:\n self.registrosPendientes[producto] += kilos\n else:\n self.registrosPendientes[producto] = kilos\n\n\n def actualizaSaldo(self,saldo):\n self.saldo+=saldo\n\n\n def delRegistros(self):\n self.registrosPendientes={}\n \n\n" } ]
3
GagarinFOX/git_project1
https://github.com/GagarinFOX/git_project1
80d1b6a25955168fae7c4423e58da4051d7bad41
8fd7883724286d3b921393db464443f71d82cd5a
f23f111edb2021a038a2918074dbd1a8f7a612be
refs/heads/master
2023-01-23T17:27:06.416989
2020-11-21T10:12:19
2020-11-21T10:12:19
314,781,273
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.5161290168762207, "avg_line_length": 11.916666984558105, "blob_id": "6ae0e39f2bc8f64f78c8415a285ae459bd6d4f7e", "content_id": "65f8445f959d7c33e6edec5f1eb58f4ba822f586", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "no_license", "max_line_length": 27, "num_lines": 12, "path": "/progr.py", "repo_name": "GagarinFOX/git_project1", "src_encoding": "UTF-8", "text": "def main():\n print('hello git')\n print('hello again')\n\n\ndef hello():\n print(\"funktion hello\")\n\n\nif __name__ == '__main__':\n main()\n hello()\n" } ]
1
assassinen/hyperquant
https://github.com/assassinen/hyperquant
a106ccc64682662948663268f6f814a1f4eb7ecb
65dfb9746bc1ee5229dcce9216af7228f78fdcd5
40f745da8112d567667933396ecbebe0a33d87a4
refs/heads/master
2022-12-16T17:16:20.965518
2019-03-05T15:54:23
2019-03-05T15:54:23
173,971,965
0
2
Apache-2.0
2019-03-05T15:27:57
2019-03-05T15:54:30
2022-12-08T01:40:20
Python
[ { "alpha_fraction": 0.6359223127365112, "alphanum_fraction": 0.6359223127365112, "avg_line_length": 14.84615421295166, "blob_id": "8e038cc41289efeb5994dca5d0eaee429e2ab098", "content_id": "361ab3298e0e0678a65613111480e64b6c360df4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 206, "license_type": "permissive", "max_line_length": 38, "num_lines": 13, "path": "/Pipfile", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "[[source]]\nurl = \"https://pypi.python.org/simple\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\npython-binance = \"*\"\nwebsocket-client = \"*\"\nrequests = \"*\"\nclickhouse-driver = \"*\"\ndjango = \"*\"\n\n[dev-packages]\n" }, { "alpha_fraction": 0.6311541199684143, "alphanum_fraction": 0.6388611793518066, "avg_line_length": 36.45992660522461, "blob_id": "0fa3691a403df087eb710870928b9024c3502091", "content_id": "c85599ab2c8a499a17ef5313a2217c2b5c9a6c08", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41131, "license_type": "permissive", "max_line_length": 120, "num_lines": 1098, "path": "/hyperquant/clients/tests/test_init.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "import logging\nimport time\nfrom datetime import datetime\nfrom unittest import TestCase\n\nfrom hyperquant.api import Sorting, Interval, OrderType, Direction\nfrom hyperquant.clients import Error, ErrorCode, ParamName, ProtocolConverter, \\\n Endpoint, DataObject, Order, OrderBook\nfrom hyperquant.clients.tests.utils import wait_for, AssertUtil, set_up_logging\nfrom hyperquant.clients.utils import create_ws_client, create_rest_client\n\nset_up_logging()\n\n\n# Converter\n\nclass TestConverter(TestCase):\n converter_class = ProtocolConverter\n\n def setUp(self):\n super().setUp()\n\n # def test_(self):\n # pass\n\n\n# Common client\n\nclass TestClient(TestCase):\n is_rest = None\n platform_id = None\n version = None\n\n is_sorting_supported = False\n testing_symbol = \"EOSETH\"\n testing_symbols = [\"EOSETH\", \"BNBBTC\"]\n wrong_symbol = \"XXXYYY\"\n\n client = None\n client_authed = None\n\n def setUp(self):\n self.skipIfBase()\n super().setUp()\n\n if self.is_rest:\n self.client = create_rest_client(self.platform_id, version=self.version)\n self.client_authed = create_rest_client(self.platform_id, True, self.version)\n else:\n self.client = create_ws_client(self.platform_id, version=self.version)\n self.client_authed = create_ws_client(self.platform_id, True, self.version)\n\n def tearDown(self):\n self.client.close()\n super().tearDown()\n\n def skipIfBase(self):\n if self.platform_id is None:\n self.skipTest(\"Skip base class\")\n\n # Utility\n\n def _result_info(self, result, sorting):\n is_asc_sorting = sorting == Sorting.ASCENDING\n items_info = \"%s first: %s last: %s sort-ok: %s \" % (\n \"ASC\" if is_asc_sorting else \"DESC\",\n self._str_item(result[0]) if result else \"-\",\n self._str_item(result[-1]) if result else \"-\",\n (result[0].timestamp < result[-1].timestamp if is_asc_sorting\n else result[0].timestamp > result[-1].timestamp) if result else \"-\")\n return items_info + \"count: %s\" % (len(result) if result else \"-\")\n\n def _str_item(self, item):\n # return str(item.item_id) + \" \" + str(item.timestamp / 100000)\n # return str(item.timestamp / 100000)\n dt = datetime.utcfromtimestamp(item.timestamp)\n return dt.isoformat()\n\n def assertRightSymbols(self, items):\n if self.testing_symbol:\n for item in items:\n # was: item.symbol = self.testing_symbol\n self.assertEqual(item.symbol, item.symbol.upper())\n self.assertEqual(item.symbol, self.testing_symbol)\n else:\n # For Trades in BitMEX\n symbols = set([item.symbol for item in items])\n self.assertGreater(len(symbols), 1)\n # self.assertGreater(len(symbols), 10)\n\n # (Assert items)\n\n def assertItemIsValid(self, trade, testing_symbol_or_symbols=None):\n if not testing_symbol_or_symbols:\n testing_symbol_or_symbols = self.testing_symbol\n\n AssertUtil.assertItemIsValid(self, trade, testing_symbol_or_symbols, self.platform_id)\n\n def assertTradeIsValid(self, trade, testing_symbol_or_symbols=None):\n if not testing_symbol_or_symbols:\n testing_symbol_or_symbols = self.testing_symbol\n\n AssertUtil.assertTradeIsValid(self, trade, testing_symbol_or_symbols, self.platform_id)\n\n def assertMyTradeIsValid(self, my_trade, testing_symbol_or_symbols=None):\n if not testing_symbol_or_symbols:\n testing_symbol_or_symbols = self.testing_symbol\n\n AssertUtil.assertMyTradeIsValid(self, my_trade, testing_symbol_or_symbols, self.platform_id)\n\n def assertCandleIsValid(self, candle, testing_symbol_or_symbols=None):\n if not testing_symbol_or_symbols:\n testing_symbol_or_symbols = self.testing_symbol\n\n AssertUtil.assertCandleIsValid(self, candle, testing_symbol_or_symbols, self.platform_id)\n\n def assertTickerIsValid(self, ticker, testing_symbol_or_symbols=None):\n # if not testing_symbol_or_symbols:\n # testing_symbol_or_symbols = self.testing_symbol\n\n AssertUtil.assertTickerIsValid(self, ticker, testing_symbol_or_symbols, self.platform_id)\n\n def assertOrderBookIsValid(self, order_book, testing_symbol_or_symbols=None):\n if not testing_symbol_or_symbols:\n testing_symbol_or_symbols = self.testing_symbol\n\n AssertUtil.assertOrderBookIsValid(self, order_book, testing_symbol_or_symbols, self.platform_id)\n\n def assertOrderBookDiffIsValid(self, order_book, testing_symbol_or_symbols=None):\n if not testing_symbol_or_symbols:\n testing_symbol_or_symbols = self.testing_symbol\n\n AssertUtil.assertOrderBookDiffIsValid(self, order_book, testing_symbol_or_symbols, self.platform_id)\n\n # def assertOrderBookItemIsValid(self, order_book_item, testing_symbol_or_symbols=None):\n # if not testing_symbol_or_symbols:\n # testing_symbol_or_symbols = self.testing_symbol\n #\n # AssertUtil.assertOrderBookItemIsValid(self, order_book_item, testing_symbol_or_symbols, self.platform_id)\n\n def assertAccountIsValid(self, account):\n AssertUtil.assertAccountIsValid(self, account, self.platform_id)\n\n def assertOrderIsValid(self, order, testing_symbol_or_symbols=None):\n if not testing_symbol_or_symbols:\n testing_symbol_or_symbols = self.testing_symbol\n\n AssertUtil.assertOrderIsValid(self, order, testing_symbol_or_symbols, self.platform_id)\n\n\n# REST\n\nclass BaseTestRESTClient(TestClient):\n is_rest = True\n\n # (If False then platform supposed to use its max_limit instead\n # of returning error when we send too big limit)\n has_limit_error = False\n is_symbol_case_sensitive = True\n\n is_rate_limit_error = False\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.is_rate_limit_error = False\n\n def setUp(self):\n self.skipIfRateLimit()\n super().setUp()\n\n def assertGoodResult(self, result, is_iterable=True, message=None):\n if isinstance(result, Error) and result.code == ErrorCode.RATE_LIMIT:\n self.__class__.is_rate_limit_error = True\n self.skipIfRateLimit()\n\n self.assertIsNotNone(result, message)\n self.assertNotIsInstance(result, Error, message or Error)\n if is_iterable:\n self.assertGreater(len(result), 0, message)\n\n def assertErrorResult(self, result, error_code_expected=None):\n if isinstance(result, Error) and result.code == ErrorCode.RATE_LIMIT:\n self.__class__.is_rate_limit_error = True\n self.skipIfRateLimit()\n\n self.assertIsNotNone(result)\n self.assertIsInstance(result, Error)\n if error_code_expected is not None:\n self.assertEqual(result.code, error_code_expected)\n\n def skipIfRateLimit(self):\n if self.__class__.is_rate_limit_error:\n self.skipTest(\"Rate limit reached for this platform. Try again later.\")\n\n\nclass TestRESTClient(BaseTestRESTClient):\n # Test all methods except history methods\n\n # (All numbers taken from https://api.binance.com/api/v1/exchangeInfo for EOSETH.\n # Define your dicts for other platforms in subclasses.)\n order_sell_limit_params = {\n ParamName.ORDER_TYPE: OrderType.LIMIT,\n ParamName.DIRECTION: Direction.SELL,\n # todo check to avoid problems\n ParamName.PRICE: \"0.22\",\n ParamName.AMOUNT: \"0.1\",\n }\n\n order_buy_market_params = {\n ParamName.ORDER_TYPE: OrderType.MARKET,\n ParamName.DIRECTION: Direction.BUY,\n # todo check to avoid problems\n # ParamName.PRICE: \"0.000001\", # no price for MARKET order\n ParamName.AMOUNT: \"0.01\",\n }\n\n order_sell_market_params = {\n ParamName.ORDER_TYPE: OrderType.MARKET,\n ParamName.DIRECTION: Direction.SELL,\n # todo check to avoid problems\n # ParamName.PRICE: \"0.000001\", # no price for MARKET order\n ParamName.AMOUNT: \"0.01\",\n }\n\n created_orders = None\n\n def tearDown(self):\n # Cancel all created orders\n if self.created_orders:\n for item in self.created_orders:\n self.client_authed.cancel_order(item)\n\n super().tearDown()\n\n # Simple methods\n\n def test_ping(self, is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n result = client.ping()\n\n self.assertGoodResult(result, False)\n\n def test_get_server_timestamp(self, is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n # With request\n client.use_milliseconds = True\n\n result0_ms = result = client.get_server_timestamp(is_refresh=True)\n\n self.assertGoodResult(result, False)\n self.assertGreater(result, 1500000000000)\n self.assertIsInstance(result, int)\n\n client.use_milliseconds = False\n\n result0_s = result = client.get_server_timestamp(is_refresh=True)\n\n self.assertGoodResult(result, False)\n self.assertGreater(result, 1500000000)\n self.assertLess(result, 15000000000)\n self.assertIsInstance(result, (int, float))\n\n # Cached\n client.use_milliseconds = True\n\n result = client.get_server_timestamp(is_refresh=False)\n\n self.assertGoodResult(result, False)\n self.assertGreater(result, 1500000000000)\n self.assertIsInstance(result, int)\n self.assertGreater(result, result0_ms)\n\n client.use_milliseconds = False\n\n result = client.get_server_timestamp(is_refresh=False)\n\n self.assertGoodResult(result, False)\n self.assertGreater(result, 1500000000)\n self.assertLess(result, 15000000000)\n self.assertIsInstance(result, (int, float))\n self.assertGreater(result, result0_s)\n\n def test_get_symbols(self, is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n result = client.get_symbols()\n\n self.assertGoodResult(result)\n self.assertGreater(len(result), 1)\n self.assertGreater(len(result), 50)\n self.assertIsInstance(result[0], str)\n if self.testing_symbol:\n self.assertIn(self.testing_symbol, result)\n\n # fetch_trades\n\n def test_fetch_trades(self, method_name=\"fetch_trades\", is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n result = getattr(client, method_name)(self.testing_symbol)\n\n self.assertGoodResult(result)\n self.assertGreater(len(result), 1)\n self.assertGreater(len(result), 50)\n self.assertTradeIsValid(result[0])\n for item in result:\n self.assertTradeIsValid(item)\n self.assertRightSymbols(result)\n\n def test_fetch_trades_errors(self, method_name=\"fetch_trades\", is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n # Wrong symbol\n result = getattr(client, method_name)(self.wrong_symbol)\n\n self.assertIsNotNone(result)\n self.assertIsInstance(result, Error)\n self.assertEqual(result.code, ErrorCode.WRONG_SYMBOL)\n\n if self.is_symbol_case_sensitive:\n # Symbol in lower case as wrong symbol\n result = getattr(client, method_name)(self.testing_symbol.lower())\n\n self.assertIsNotNone(result)\n self.assertIsInstance(result, Error)\n self.assertTrue(result.code == ErrorCode.WRONG_SYMBOL or\n result.code == ErrorCode.WRONG_PARAM)\n\n def test_fetch_trades_limit(self, method_name=\"fetch_trades\", is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n self.assertFalse(client.converter.is_use_max_limit)\n\n # Test limit\n self.assertFalse(client.use_milliseconds)\n # client.use_milliseconds = False\n result = getattr(client, method_name)(self.testing_symbol, 2)\n\n self.assertGoodResult(result)\n self.assertEqual(len(result), 2)\n # (Test use_milliseconds)\n self.assertLess(result[0].timestamp, time.time())\n\n # Test is_use_max_limit (with limit param)\n client.use_milliseconds = True\n client.converter.is_use_max_limit = True\n result = getattr(client, method_name)(self.testing_symbol, 2)\n\n self.assertGoodResult(result)\n self.assertEqual(len(result), 2)\n # (Test use_milliseconds)\n self.assertGreater(result[0].timestamp, time.time())\n\n # (Get default item count)\n result = getattr(client, method_name)(self.testing_symbol)\n self.assertGoodResult(result)\n default_item_count = len(result)\n\n # Test is_use_max_limit (without limit param)\n client.converter.is_use_max_limit = True\n result = getattr(client, method_name)(self.testing_symbol)\n\n self.assertGoodResult(result)\n self.assertGreaterEqual(len(result), default_item_count, \"Sometimes needs retry (for BitMEX, for example)\")\n for item in result:\n self.assertTradeIsValid(item)\n self.assertRightSymbols(result)\n\n def test_fetch_trades_limit_is_too_big(self, method_name=\"fetch_trades\", is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n # Test limit is too big\n too_big_limit = 1000000\n result = getattr(client, method_name)(self.testing_symbol, too_big_limit)\n\n self.assertIsNotNone(result)\n if self.has_limit_error:\n self.assertIsInstance(result, Error)\n self.assertErrorResult(result, ErrorCode.WRONG_LIMIT)\n else:\n self.assertGoodResult(result)\n self.assertGreater(len(result), 10)\n self.assertLess(len(result), too_big_limit)\n for item in result:\n self.assertTradeIsValid(item)\n self.assertRightSymbols(result)\n max_limit_count = len(result)\n\n # Test is_use_max_limit uses the maximum possible limit\n client.converter.is_use_max_limit = True\n result = getattr(client, method_name)(self.testing_symbol)\n\n self.assertEqual(len(result), max_limit_count, \"is_use_max_limit doesn't work\")\n\n def test_fetch_trades_sorting(self, method_name=\"fetch_trades\", is_auth=False):\n if not self.is_sorting_supported:\n self.skipTest(\"Sorting is not supported by platform.\")\n\n client = self.client_authed if is_auth else self.client\n\n self.assertEqual(client.converter.sorting, Sorting.DESCENDING)\n\n # Test descending (default) sorting\n result = getattr(client, method_name)(self.testing_symbol)\n\n self.assertGoodResult(result)\n self.assertGreater(len(result), 2)\n self.assertGreater(result[0].timestamp, result[-1].timestamp)\n\n # Test ascending sorting\n client.converter.sorting = Sorting.ASCENDING\n result2 = getattr(client, method_name)(self.testing_symbol)\n\n self.assertGoodResult(result2)\n self.assertGreater(len(result2), 2)\n self.assertLess(result2[0].timestamp, result2[-1].timestamp)\n\n # (not necessary)\n # print(\"TEMP timestamps:\", result[0].timestamp, result[-1].timestamp)\n # print(\"TEMP timestamps:\", result2[0].timestamp, result2[-1].timestamp)\n # # Test that it is the same items for both sorting types\n # self.assertGreaterEqual(result2[0].timestamp, result[-1].timestamp)\n # self.assertGreaterEqual(result[0].timestamp, result2[-1].timestamp)\n # Test that interval of items sorted ascending is far before the interval of descending\n self.assertLess(result2[0].timestamp, result[-1].timestamp)\n self.assertLess(result2[0].timestamp, result[0].timestamp)\n\n # Other public methods\n\n def test_fetch_candles(self):\n client = self.client\n testing_interval = Interval.DAY_3\n\n # Error\n result = client.fetch_candles(None, None)\n\n self.assertErrorResult(result)\n\n result = client.fetch_candles(self.testing_symbol, None)\n\n self.assertErrorResult(result)\n\n # Good\n result = client.fetch_candles(self.testing_symbol, testing_interval)\n\n self.assertGoodResult(result)\n for item in result:\n self.assertCandleIsValid(item, self.testing_symbol)\n self.assertEqual(item.interval, testing_interval)\n\n # todo test from_, to_, and limit\n\n def test_fetch_ticker(self):\n client = self.client\n\n # Error\n\n # Good\n\n # Empty params\n result = client.fetch_ticker(None)\n\n self.assertGoodResult(result)\n self.assertGreater(len(result), 2)\n for item in result:\n self.assertTickerIsValid(item)\n\n # Full params\n result = client.fetch_ticker(self.testing_symbol)\n\n self.assertGoodResult(result, False)\n self.assertTickerIsValid(result, self.testing_symbol)\n\n def test_fetch_tickers(self):\n client = self.client\n\n # Error\n\n # Good\n\n # Empty params\n result = client.fetch_tickers()\n\n self.assertGoodResult(result)\n self.assertGreater(len(result), 2)\n for item in result:\n self.assertTickerIsValid(item)\n\n # Full params\n result = client.fetch_tickers(self.testing_symbols)\n\n self.assertGoodResult(result)\n self.assertEqual(len(result), len(self.testing_symbols))\n for item in result:\n self.assertTickerIsValid(item, self.testing_symbols)\n\n def test_fetch_order_book(self):\n client = self.client\n\n # Error\n\n # Empty params\n result = client.fetch_order_book()\n\n self.assertErrorResult(result)\n\n # Good\n\n # Full params\n result = client.fetch_order_book(self.testing_symbol)\n\n self.assertGoodResult(result, False)\n self.assertOrderBookIsValid(result)\n\n # todo test limit and is_use_max_limit\n\n # Private API methods\n\n def test_fetch_account_info(self):\n client = self.client_authed\n\n # Error\n\n # Good\n\n # Empty params # Full params\n result = client.fetch_account_info()\n\n self.assertGoodResult(result, is_iterable=False)\n self.assertAccountIsValid(result)\n\n def test_fetch_my_trades(self):\n client = self.client_authed\n\n # Error\n\n # Empty params\n result = client.fetch_my_trades(None)\n\n self.assertErrorResult(result)\n\n # Good\n\n # Full params\n result = client.fetch_my_trades(self.testing_symbol)\n\n NO_ITEMS_FOR_ACCOUNT = True\n self.assertGoodResult(result, not NO_ITEMS_FOR_ACCOUNT)\n for item in result:\n self.assertMyTradeIsValid(item, self.testing_symbols)\n\n # Limit\n result = client.fetch_my_trades(self.testing_symbol, 1)\n\n self.assertGoodResult(result, not NO_ITEMS_FOR_ACCOUNT)\n self.assertLessEqual(len(result), 1)\n\n result = client.fetch_my_trades(self.testing_symbol, 7)\n\n self.assertGoodResult(result, not NO_ITEMS_FOR_ACCOUNT)\n self.assertLessEqual(len(result), 7)\n if len(result) < 7:\n logging.warning(\"You have not enough my trades to test limit for sure.\")\n for item in result:\n self.assertMyTradeIsValid(item, self.testing_symbols)\n\n def test_create_order(self):\n client = self.client_authed\n\n # Error\n\n # Empty params\n result = client.create_order(None, None, None, None, None)\n\n self.assertErrorResult(result)\n\n # Good\n\n # Sell, limit\n result = client.create_order(self.testing_symbol, **self.order_sell_limit_params, is_test=True)\n\n self.assertGoodResult(result)\n cancel_result = client.cancel_order(result)\n\n self.assertOrderIsValid(result, self.testing_symbol)\n self.assertEqual(result.order_type, self.order_sell_limit_params.get(ParamName.ORDER_TYPE))\n self.assertEqual(result.direction, self.order_sell_limit_params.get(ParamName.DIRECTION))\n self.assertEqual(result.price, self.order_sell_limit_params.get(ParamName.PRICE))\n self.assertEqual(result.amount, self.order_sell_limit_params.get(ParamName.AMOUNT))\n self._check_canceled(cancel_result)\n\n IS_REAL_MONEY = True\n if IS_REAL_MONEY:\n return\n\n # Full params\n # Buy, market\n result = client.create_order(self.testing_symbol, **self.order_buy_market_params, is_test=True)\n\n self.assertGoodResult(result, is_iterable=False)\n cancel_result = client.cancel_order(result) # May be not already filled\n\n self.assertOrderIsValid(result, self.testing_symbol)\n self.assertEqual(result.order_type, self.order_buy_market_params.get(ParamName.ORDER_TYPE))\n self.assertEqual(result.direction, self.order_buy_market_params.get(ParamName.DIRECTION))\n self.assertEqual(result.price, self.order_buy_market_params.get(ParamName.PRICE))\n self.assertEqual(result.amount, self.order_buy_market_params.get(ParamName.AMOUNT))\n self._check_canceled(cancel_result)\n\n # Sell, market - to revert buy-market order\n result = client.create_order(self.testing_symbol, **self.order_sell_market_params, is_test=True)\n\n self.assertGoodResult(result, is_iterable=False)\n cancel_result = client.cancel_order(result)\n\n self.assertOrderIsValid(result, self.testing_symbol)\n self.assertEqual(result.order_type, self.order_sell_market_params.get(ParamName.ORDER_TYPE))\n self.assertEqual(result.direction, self.order_sell_market_params.get(ParamName.DIRECTION))\n self.assertEqual(result.price, self.order_sell_market_params.get(ParamName.PRICE))\n self.assertEqual(result.amount, self.order_sell_market_params.get(ParamName.AMOUNT))\n self._check_canceled(cancel_result)\n\n def _create_order(self):\n client = self.client_authed\n\n order = client.create_order(self.testing_symbol, **self.order_sell_limit_params, is_test=False)\n\n self.assertOrderIsValid(order)\n # Add for canceling in tearDown\n if not self.created_orders:\n self.created_orders = []\n self.created_orders.append(order)\n\n return order\n\n def _check_canceled(self, cancel_result):\n self.assertGoodResult(cancel_result, False, \"IMPORTANT! Order was created during tests, but not canceled!\")\n\n def assertCanceledOrder(self, order, symbol, item_id):\n self.assertItemIsValid(order, symbol)\n self.assertIsInstance(order, Order)\n self.assertEqual(order.item_id, item_id)\n\n def test_cancel_order(self):\n client = self.client_authed\n\n # Error\n\n # Empty params\n result = client.cancel_order(None)\n\n self.assertErrorResult(result)\n\n # Good\n\n # Full params\n order = self._create_order()\n result = client.cancel_order(order, \"some\")\n\n self._check_canceled(result)\n # self.assertGoodResult(result)\n self.assertNotEqual(result, order)\n self.assertCanceledOrder(result, order.symbol, order.item_id)\n\n # Same by item_id and symbol\n order = self._create_order()\n result = client.cancel_order(order.item_id, order.symbol)\n\n self._check_canceled(result)\n # self.assertGoodResult(result)\n self.assertIsNot(result, order)\n self.assertEqual(result, order)\n # self.assertNotEqual(result, order)\n self.assertOrderIsValid(result)\n self.assertCanceledOrder(result, order.symbol, order.item_id)\n\n def test_check_order(self):\n client = self.client_authed\n\n # Error\n\n # Empty params\n result = client.check_order(None)\n\n self.assertErrorResult(result)\n\n # temp\n result = client.check_order(\"someid\", \"somesymb\")\n # Good\n\n # Full params\n order = self._create_order()\n result = client.check_order(order, \"some\")\n\n self.assertGoodResult(result)\n self.assertEqual(order, result)\n self.assertOrderIsValid(result)\n\n # Same by item_id and symbol\n result = client.check_order(order.item_id, order.symbol)\n\n self.assertGoodResult(result)\n self.assertEqual(order, result)\n self.assertOrderIsValid(result)\n\n cancel_result = client.cancel_order(order)\n self._check_canceled(cancel_result)\n\n def test_fetch_orders(self):\n client = self.client_authed\n\n # Error\n\n # Good\n order = None\n order = self._create_order()\n\n # Empty params\n # Commented because for Binance it has weight 40\n # result = client.fetch_orders()\n #\n # self.assertGoodResult(result)\n # self.assertGreater(len(result), 0)\n # for item in result:\n # self.assertOrderIsValid(item)\n\n # All\n result = client.fetch_orders(self.testing_symbol, is_open=False)\n\n self.assertGoodResult(result)\n # self.assertGreater(len(result), 0)\n for item in result:\n self.assertOrderIsValid(item)\n\n # Full params\n result = client.fetch_orders(self.testing_symbol, is_open=True)\n\n self.assertGoodResult(result)\n # self.assertGreater(len(result), 0)\n for item in result:\n self.assertOrderIsValid(item)\n\n cancel_result = client.cancel_order(order)\n self._check_canceled(cancel_result)\n\n # All (all open are closed)\n result = client.fetch_orders(self.testing_symbol, is_open=False)\n\n self.assertGoodResult(result)\n self.assertGreater(len(result), 0)\n for item in result:\n self.assertOrderIsValid(item)\n\n # todo test also limit and from_item (and to_item? - for binance) for is_open=false\n\n\nclass TestRESTClientHistory(BaseTestRESTClient):\n # Test only history methods\n\n is_pagination_supported = True\n is_to_item_supported = True\n is_to_item_by_id = False\n\n # fetch_history\n\n def test_fetch_history_from_and_to_item(self, endpoint=Endpoint.TRADE, is_auth=True,\n timestamp_param=ParamName.TIMESTAMP):\n client = self.client_authed if is_auth else self.client\n\n # Limit must be greater than max items with same timestamp (greater than 10 at least)\n limit = 50\n\n # (Get items to be used to set from_item, to_item params)\n result0 = result = client.fetch_history(endpoint, self.testing_symbol,\n sorting=Sorting.DESCENDING, limit=limit)\n\n # print(\"\\n#0\", len(result), result)\n self.assertGoodResult(result)\n self.assertGreater(len(result), 2)\n if client.converter.IS_SORTING_ENABLED:\n self.assertGreater(result[0].timestamp, result[-1].timestamp)\n\n # Test FROM_ITEM and TO_ITEM\n result = client.fetch_history(endpoint, self.testing_symbol,\n sorting=Sorting.DESCENDING, # limit=limit,\n from_item=result0[0], to_item=result0[-1])\n\n # print(\"\\n#1\", len(result), result)\n self.assertGoodResult(result)\n self.assertGreater(len(result), 2)\n self.assertIn(result[0], result0, \"Try restart tests.\")\n # self.assertIn(result[-10], result0, \"Try restart tests.\")\n if self.is_to_item_supported:\n self.assertIn(result[-1], result0, \"Try restart tests.\")\n # self.assertEqual(len(result), len(result0))\n # self.assertEqual(result, result0)\n\n # Test FROM_ITEM and TO_ITEM in wrong order\n result = client.fetch_history(endpoint, self.testing_symbol,\n sorting=Sorting.DESCENDING, # limit=limit,\n from_item=result0[-1], to_item=result0[0])\n\n # print(\"\\n#2\", len(result), result)\n self.assertGoodResult(result)\n self.assertGreater(len(result), 2)\n self.assertIn(result[0], result0, \"Try restart tests.\")\n # self.assertIn(result[-10], result0, \"Try restart tests.\")\n if self.is_to_item_supported:\n self.assertIn(result[-1], result0, \"Try restart tests.\")\n # self.assertEqual(len(result), len(result0))\n # self.assertEqual(result, result0)\n\n # Test FROM_ITEM and TO_ITEM in wrong order and sorted differently\n result = client.fetch_history(endpoint, self.testing_symbol,\n sorting=Sorting.ASCENDING, # limit=limit,\n from_item=result0[-1], to_item=result0[0])\n\n # print(\"\\n#3\", len(result), result)\n self.assertGoodResult(result)\n self.assertGreater(len(result), 2)\n self.assertIn(result[0], result0, \"Try restart tests.\")\n # self.assertIn(result[-10], result0, \"Try restart tests.\")\n if self.is_to_item_supported:\n self.assertIn(result[-1], result0, \"Try restart tests.\")\n # self.assertEqual(len(result), len(result0))\n # self.assertEqual(result, result0)\n\n def test_fetch_history_with_all_params(self, endpoint=Endpoint.TRADE, is_auth=True,\n timestamp_param=ParamName.TIMESTAMP):\n client = self.client_authed if is_auth else self.client\n\n # (Get items to be used to set from_item, to_item params)\n # Test SYMBOL and LIMIT\n self.assertEqual(client.converter.sorting, Sorting.DESCENDING)\n limit = 10\n result = client.fetch_history(endpoint, self.testing_symbol, limit)\n\n self.assertGoodResult(result)\n self.assertEqual(len(result), limit)\n if client.converter.IS_SORTING_ENABLED:\n self.assertGreater(result[0].timestamp, result[-1].timestamp)\n # print(\"TEMP result\", result)\n\n # Test FROM_ITEM and TO_ITEM\n from_item = result[1]\n to_item = result[-2]\n print(\"Get history from_item:\", from_item, \"to_item:\", to_item)\n result = client.fetch_history(endpoint, self.testing_symbol,\n from_item=from_item, to_item=to_item)\n\n # print(\"TEMP result:\", result)\n self.assertGoodResult(result)\n if self.is_to_item_supported:\n if self.is_to_item_by_id:\n self.assertEqual(len(result), limit - 2)\n self.assertEqual(result[-1].timestamp, to_item.timestamp)\n\n # Test SORTING, get default_result_len\n result = client.fetch_history(endpoint, self.testing_symbol,\n sorting=Sorting.ASCENDING)\n\n self.assertGoodResult(result)\n self.assertGreater(len(result), limit)\n if client.converter.IS_SORTING_ENABLED:\n self.assertLess(result[0].timestamp, result[-1].timestamp)\n default_result_len = len(result)\n\n # Test IS_USE_MAX_LIMIT\n result = client.fetch_history(endpoint, self.testing_symbol,\n is_use_max_limit=True)\n\n self.assertGoodResult(result)\n self.assertGreaterEqual(len(result), default_result_len)\n\n # Test SYMBOL param as a list\n if self.testing_symbol:\n # (Note: for Binance fetch_history(endpoint, [\"some\", \"some\"])\n # sends request without 2 SYMBOL get params which cases error.)\n # (Note: for BitMEX fetch_history(endpoint, [None, None])\n # sends request without SYMBOL get param which is usual request - so skip here.)\n result = client.fetch_history(endpoint, [self.testing_symbol, self.testing_symbol])\n\n self.assertIsNotNone(result)\n # (Bitfinex returns [] on such error)\n if result:\n self.assertErrorResult(result)\n\n # fetch_trades_history\n\n test_fetch_trades = TestRESTClient.test_fetch_trades\n test_fetch_trades_errors = TestRESTClient.test_fetch_trades_errors\n test_fetch_trades_limit = TestRESTClient.test_fetch_trades_limit\n test_fetch_trades_limit_is_too_big = TestRESTClient.test_fetch_trades_limit_is_too_big\n test_fetch_trades_sorting = TestRESTClient.test_fetch_trades_sorting\n\n def test_fetch_trades_history(self):\n self.test_fetch_trades(\"fetch_trades_history\")\n\n def test_fetch_trades_history_errors(self):\n self.test_fetch_trades_errors(\"fetch_trades_history\")\n\n def test_fetch_trades_history_limit(self):\n self.test_fetch_trades_limit(\"fetch_trades_history\")\n\n def test_fetch_trades_history_limit_is_too_big(self):\n self.test_fetch_trades_limit_is_too_big(\"fetch_trades_history\")\n\n def test_fetch_trades_history_sorting(self):\n self.test_fetch_trades_sorting(\"fetch_trades_history\")\n\n def test_fetch_trades_is_same_as_first_history(self):\n result = self.client_authed.fetch_trades(self.testing_symbol)\n result_history = self.client_authed.fetch_trades_history(self.testing_symbol)\n\n self.assertNotIsInstance(result, Error)\n self.assertGreater(len(result), 10)\n # self.assertIn(result_history[0], result, \"Try restart\")\n self.assertIn(result_history[10], result, \"Try restart\")\n self.assertIn(result[-1], result_history)\n self.assertEqual(result, result_history,\n \"Can fail sometimes due to item added between requests\")\n\n def test_fetch_trades_history_over_and_over(self, sorting=None):\n if not self.is_pagination_supported:\n self.skipTest(\"Pagination is not supported by current platform version.\")\n\n if self.is_sorting_supported and not sorting:\n self.test_fetch_trades_history_over_and_over(Sorting.DESCENDING)\n self.test_fetch_trades_history_over_and_over(Sorting.ASCENDING)\n return\n\n client = self.client_authed\n client.converter.is_use_max_limit = True\n\n print(\"Test trade paging with\",\n \"sorting: \" + sorting if sorting else \"default_sorting: \" + client.default_sorting)\n if not sorting:\n sorting = client.default_sorting\n\n result = client.fetch_trades(self.testing_symbol, sorting=sorting)\n self.assertGoodResult(result)\n page_count = 1\n print(\"Page:\", page_count, self._result_info(result, sorting))\n\n while result and not isinstance(result, Error):\n prev_result = result\n result = client.fetch_trades_history(self.testing_symbol, sorting=sorting, from_item=result[-1])\n page_count += 1\n self.assertGoodResult(result)\n if isinstance(result, Error):\n # Rate limit error!\n print(\"Page:\", page_count, \"error:\", result)\n else:\n # Check next page\n print(\"Page:\", page_count, self._result_info(result, sorting))\n self.assertGreater(len(result), 2)\n for item in result:\n self.assertTradeIsValid(item)\n self.assertRightSymbols(result)\n if sorting == Sorting.ASCENDING:\n # Oldest first\n self.assertLess(prev_result[0].timestamp, prev_result[-1].timestamp,\n \"Error in sorting\") # Check sorting is ok\n self.assertLess(result[0].timestamp, result[-1].timestamp,\n \"Error in sorting\") # Check sorting is ok\n self.assertLessEqual(prev_result[-1].timestamp, result[0].timestamp,\n \"Error in paging\") # Check next page\n else:\n # Newest first\n self.assertGreater(prev_result[0].timestamp, prev_result[-1].timestamp,\n \"Error in sorting\") # Check sorting is ok\n self.assertGreater(result[0].timestamp, result[-1].timestamp,\n \"Error in sorting\") # Check sorting is ok\n self.assertGreaterEqual(prev_result[-1].timestamp, result[0].timestamp,\n \"Error in paging\") # Check next page\n\n if page_count > 2:\n print(\"Break to prevent RATE_LIMIT error.\")\n break\n\n print(\"Pages count:\", page_count)\n\n # For debugging only\n def test_just_logging_for_paging(self, method_name=\"fetch_trades_history\", is_auth=False, sorting=None):\n if self.is_sorting_supported and not sorting:\n self.test_just_logging_for_paging(method_name, is_auth, Sorting.DESCENDING)\n self.test_just_logging_for_paging(method_name, is_auth, Sorting.ASCENDING)\n return\n\n client = self.client_authed if is_auth else self.client\n print(\"Logging paging with\",\n \"sorting: \" + sorting if sorting else \"default_sorting: \" + client.converter.default_sorting)\n if not sorting:\n sorting = client.converter.default_sorting\n\n print(\"\\n==First page==\")\n result0 = result = getattr(client, method_name)(self.testing_symbol, sorting=sorting)\n\n self.assertGoodResult(result)\n print(\"_result_info:\", self._result_info(result, sorting))\n\n print(\"\\n==Next page==\")\n # print(\"\\nXXX\", result0[-1].timestamp)\n # result0[-1].timestamp -= 100\n # print(\"\\nXXX\", result0[-1].timestamp)\n result = getattr(client, method_name)(self.testing_symbol, sorting=sorting, from_item=result0[-1])\n # print(\"\\nXXX\", result0[0].timestamp, result0[-1].timestamp)\n # print(\"\\nYYY\", result[0].timestamp, result[-1].timestamp)\n\n if result:\n # To check rate limit error\n self.assertGoodResult(result)\n print(\"_result_info:\", self._result_info(result, sorting))\n\n print(\"\\n==Failed page==\")\n result = getattr(client, method_name)(self.testing_symbol, sorting=sorting, from_item=result0[0])\n\n self.assertGoodResult(result)\n print(\"_result_info:\", self._result_info(result, sorting))\n\n\n# WebSocket\n\nclass TestWSClient(TestClient):\n is_rest = False\n\n testing_symbols = [\"ETHBTC\", \"BTXUSD\"]\n received_items = None\n\n def setUp(self):\n self.skipIfBase()\n\n super().setUp()\n self.received_items = []\n\n def on_item_received(item):\n if isinstance(item, DataObject):\n self.received_items.append(item)\n\n self.client.on_item_received = on_item_received\n self.client_authed.on_item_received = on_item_received\n\n def test_trade_1_channel(self):\n self._test_endpoint_channels([Endpoint.TRADE], [self.testing_symbol], self.assertTradeIsValid)\n\n def test_trade_2_channel(self):\n self._test_endpoint_channels([Endpoint.TRADE], self.testing_symbols, self.assertTradeIsValid)\n\n def test_candle_1_channel(self):\n params = {ParamName.INTERVAL: Interval.MIN_1}\n self._test_endpoint_channels([Endpoint.CANDLE], [self.testing_symbol], self.assertCandleIsValid, params)\n\n def test_candle_2_channel(self):\n params = {ParamName.INTERVAL: Interval.MIN_1}\n self._test_endpoint_channels([Endpoint.CANDLE], self.testing_symbols, self.assertCandleIsValid, params)\n\n def test_ticker1_channel(self):\n self._test_endpoint_channels([Endpoint.TICKER], [self.testing_symbol], self.assertTickerIsValid)\n\n def test_ticker2_channel(self):\n self._test_endpoint_channels([Endpoint.TICKER], self.testing_symbols, self.assertTickerIsValid)\n\n def test_ticker_all_channel(self):\n self._test_endpoint_channels([Endpoint.TICKER_ALL], None, self.assertTickerIsValid)\n\n def test_order_book_1_channel(self):\n params = {ParamName.LEVEL: 5}\n self._test_endpoint_channels([Endpoint.ORDER_BOOK], [self.testing_symbol], self.assertOrderBookIsValid, params)\n\n def test_order_book_2_channel(self):\n params = {ParamName.LEVEL: 5}\n self._test_endpoint_channels([Endpoint.ORDER_BOOK], self.testing_symbols, self.assertOrderBookIsValid, params)\n\n def test_order_book_diff_1_channel(self):\n self._test_endpoint_channels([Endpoint.ORDER_BOOK_DIFF], [self.testing_symbol], self.assertOrderBookDiffIsValid)\n\n def test_order_book_diff_2_channel(self):\n self._test_endpoint_channels([Endpoint.ORDER_BOOK_DIFF], self.testing_symbols, self.assertOrderBookDiffIsValid)\n\n def _test_endpoint_channels(self, endpoints, symbols, assertIsValidFun, params=None, is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n if not isinstance(endpoints, (list, tuple)):\n endpoints = [endpoints]\n if symbols and not isinstance(symbols, (list, tuple)):\n symbols = [symbols]\n\n client.subscribe(endpoints, symbols, **params or {})\n\n # todo wait for all endpoints and all symbols?\n wait_for(self.received_items, timeout_sec=10000000)\n\n self.assertGreaterEqual(len(self.received_items), 1)\n for item in self.received_items:\n assertIsValidFun(item, symbols)\n" }, { "alpha_fraction": 0.6496784687042236, "alphanum_fraction": 0.6552196145057678, "avg_line_length": 40.76571273803711, "blob_id": "d3cd8c29e0b84cfb538327f1b4341dee36fea758", "content_id": "0343129727d4e8c70e3b43f4fe9383e5618fb0ea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14618, "license_type": "permissive", "max_line_length": 140, "num_lines": 350, "path": "/hyperquant/clients/tests/utils.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "import logging\nimport sys\nimport time\nfrom unittest import TestCase\n\nfrom hyperquant.api import OrderStatus, Direction, OrderType, OrderBookDirection, Interval\nfrom hyperquant.clients import Trade, ItemObject, Candle, MyTrade, Ticker, Order, OrderBookItem, OrderBook, Account, \\\n Balance\n\n\n# Utility\n\ndef set_up_logging(is_debug=True):\n logging_format = \"%(asctime)s %(levelname)s:%(name)s: %(message)s\"\n logging.basicConfig(level=logging.DEBUG if is_debug else logging.INFO,\n stream=sys.stdout, format=logging_format)\n\n\ndef wait_for(value_or_callable, count=2, timeout_sec=10):\n # Wait for value is of \"count\" length or \"timeout_sec\" elapsed.\n start_time = time.time()\n value, fun = (None, value_or_callable) if callable(value_or_callable) \\\n else (value_or_callable, None)\n print(\"\\n### Waiting a list for count: %s or timeout_sec: %s\" % (count, timeout_sec))\n while not timeout_sec or time.time() - start_time < timeout_sec:\n if fun:\n value = fun()\n if isinstance(value, bool):\n if value:\n print(\"\\n### Result is true: %s in %s seconds\" % (value, time.time() - start_time))\n return\n else:\n value_count = value if isinstance(value, int) else len(value)\n if value_count >= count:\n print(\"\\n### Count reached: %s of %s in %s seconds\" % (value_count, count, time.time() - start_time))\n return\n print(\"\\n### Sleep... current count: %s of %s, %s seconds passed\" % (value_count, count, time.time() - start_time))\n time.sleep(min(1, timeout_sec / 10) if timeout_sec else 1)\n print(\"\\n### Time is out! (value)\")\n raise Exception(\"Time is out!\")\n\n\ndef wait_for_history(history_connector, timeout_sec=10):\n # Wait for item_list is of \"count\" length or \"timeout_sec\" elapsed.\n start_time = time.time()\n print(\"\\n### Waiting a history_connector or timeout_sec: %s\" % (timeout_sec))\n while not timeout_sec or time.time() - start_time < timeout_sec:\n if not history_connector.is_in_progress:\n if history_connector.is_complete:\n print(\"\\n### All (or no) history retrieved in: %s seconds\" % (time.time() - start_time))\n else:\n print(\"\\n### All history closed complete. Worked: %s seconds\" % (time.time() - start_time))\n return True\n time.sleep(min(3, timeout_sec / 10) if timeout_sec else 1)\n print(\"\\n### Time is out! (history_connector)\")\n raise Exception(\"Time is out!\")\n # return False\n\n\nclass AssertUtil(TestCase):\n # Don't extend with this class, but use functions in your test classes\n\n def assertItemIsValid(self, item, testing_symbol_or_symbols=None, platform_id=None,\n is_with_item_id=True, is_with_timestamp=True):\n self.assertIsNotNone(item)\n self.assertIsInstance(item, ItemObject)\n\n # Not empty\n self.assertIsNotNone(item.platform_id)\n self.assertIsNotNone(item.symbol)\n if is_with_timestamp:\n self.assertIsNotNone(item.timestamp)\n if is_with_item_id:\n self.assertIsNotNone(item.item_id) # trade_id: binance, bitfinex - int converted to str; bitmex - str\n\n # Type\n self.assertIsInstance(item.platform_id, int)\n self.assertIsInstance(item.symbol, str)\n if is_with_timestamp:\n self.assertTrue(isinstance(item.timestamp, (float, int)))\n if is_with_item_id:\n self.assertIsInstance(item.item_id, str)\n\n # Value\n self.assertEqual(item.platform_id, platform_id)\n if is_with_timestamp:\n self.assertGreater(item.timestamp, 1000000000)\n if item.is_milliseconds:\n self.assertGreater(item.timestamp, 10000000000)\n if testing_symbol_or_symbols:\n self.assertEqual(item.symbol, item.symbol.upper())\n if isinstance(testing_symbol_or_symbols, str):\n self.assertEqual(item.symbol, testing_symbol_or_symbols)\n else:\n self.assertIn(item.symbol, testing_symbol_or_symbols)\n if is_with_item_id:\n self.assertGreater(len(str(item.item_id)), 0)\n\n def assertTradeIsValid(self, trade, testing_symbol_or_symbols=None, platform_id=None, is_dict=False):\n if is_dict and trade:\n trade = Trade(**trade)\n\n AssertUtil.assertItemIsValid(self, trade, testing_symbol_or_symbols, platform_id, True)\n\n self.assertIsInstance(trade, Trade)\n\n # Not empty\n self.assertIsNotNone(trade.price)\n self.assertIsNotNone(trade.amount)\n # self.assertIsNotNone(trade.direction)\n\n # Type\n self.assertIsInstance(trade.price, str)\n self.assertIsInstance(trade.amount, str)\n if trade.direction is not None:\n self.assertIsInstance(trade.direction, int)\n\n # Value\n self.assertGreater(float(trade.price), 0)\n self.assertGreater(float(trade.amount), 0)\n if trade.direction is not None:\n self.assertIn(float(trade.direction), Direction.name_by_value)\n\n def assertMyTradeIsValid(self, my_trade, testing_symbol_or_symbols=None, platform_id=None, is_dict=False):\n if is_dict and my_trade:\n my_trade = MyTrade(**my_trade)\n\n AssertUtil.assertTradeIsValid(self, my_trade, testing_symbol_or_symbols, platform_id, True)\n\n self.assertIsInstance(my_trade, MyTrade)\n\n # Not empty\n self.assertIsNotNone(my_trade.order_id)\n # self.assertIsNotNone(my_trade.fee)\n # self.assertIsNotNone(my_trade.rebate)\n\n # Type\n self.assertIsInstance(my_trade.order_id, str)\n if my_trade.fee is not None:\n self.assertIsInstance(my_trade.fee, str)\n if my_trade.rebate is not None:\n self.assertIsInstance(my_trade.rebate, str)\n\n # Value\n if my_trade.fee is not None:\n self.assertGreater(float(my_trade.fee), 0)\n if my_trade.rebate is not None:\n self.assertGreater(float(my_trade.rebate), 0)\n\n def assertCandleIsValid(self, candle, testing_symbol_or_symbols=None, platform_id=None, is_dict=False):\n if is_dict and candle:\n candle = Candle(**candle)\n\n AssertUtil.assertItemIsValid(self, candle, testing_symbol_or_symbols, platform_id, False)\n\n self.assertIsInstance(candle, Candle)\n\n # Not empty\n self.assertIsNotNone(candle.interval)\n self.assertIsNotNone(candle.price_open)\n self.assertIsNotNone(candle.price_close)\n self.assertIsNotNone(candle.price_high)\n self.assertIsNotNone(candle.price_low)\n # Optional\n # self.assertIsNotNone(candle.amount)\n # self.assertIsNotNone(candle.trades_count)\n\n # Type\n self.assertIsInstance(candle.interval, str)\n self.assertIsInstance(candle.price_open, str)\n self.assertIsInstance(candle.price_close, str)\n self.assertIsInstance(candle.price_high, str)\n self.assertIsInstance(candle.price_low, str)\n if candle.amount is not None:\n self.assertIsInstance(candle.amount, str)\n if candle.trades_count is not None:\n self.assertIsInstance(candle.trades_count, int)\n\n # Value\n self.assertIn(candle.interval, Interval.ALL)\n self.assertGreater(float(candle.price_open), 0)\n self.assertGreater(float(candle.price_close), 0)\n self.assertGreater(float(candle.price_high), 0)\n self.assertGreater(float(candle.price_low), 0)\n if candle.amount is not None:\n self.assertGreater(float(candle.amount), 0)\n if candle.trades_count is not None:\n self.assertGreater(candle.trades_count, 0)\n\n def assertTickerIsValid(self, ticker, testing_symbol_or_symbols=None, platform_id=None, is_dict=False):\n if is_dict and ticker:\n ticker = Ticker(**ticker)\n\n AssertUtil.assertItemIsValid(self, ticker, testing_symbol_or_symbols, platform_id, False, False)\n\n self.assertIsInstance(ticker, Ticker)\n\n # Not empty\n self.assertIsNotNone(ticker.price)\n\n # Type\n self.assertIsInstance(ticker.price, str)\n\n # Value\n self.assertGreater(float(ticker.price), 0)\n\n def assertOrderBookIsValid(self, order_book, testing_symbol_or_symbols=None, platform_id=None, is_dict=False,\n is_diff=False):\n if is_dict and order_book:\n order_book = OrderBook(**order_book)\n\n # Assert order book\n AssertUtil.assertItemIsValid(self, order_book, testing_symbol_or_symbols, platform_id, False, False)\n\n self.assertIsInstance(order_book, OrderBook)\n self.assertIsNotNone(order_book.asks)\n self.assertIsNotNone(order_book.bids)\n # if is_diff:\n self.assertGreaterEqual(len(order_book.asks), 0)\n self.assertGreaterEqual(len(order_book.bids), 0)\n # For order book diff\n self.assertGreater(len(order_book.asks + order_book.bids), 0)\n # else:\n # self.assertGreater(len(order_book.asks), 0)\n # self.assertGreater(len(order_book.bids), 0)\n\n # Assert order book items\n for item in order_book.asks:\n AssertUtil.assertOrderBookItemIsValid(self, item)\n for item in order_book.bids:\n AssertUtil.assertOrderBookItemIsValid(self, item)\n\n def assertOrderBookDiffIsValid(self, order_book, testing_symbol_or_symbols=None, platform_id=None, is_dict=False):\n AssertUtil.assertOrderBookIsValid(self, order_book, testing_symbol_or_symbols, platform_id, is_dict, is_diff=True)\n\n def assertOrderBookItemIsValid(self, order_book_item, testing_symbol_or_symbols=None, platform_id=None, is_dict=False):\n if is_dict and order_book_item:\n order_book_item = OrderBookItem(**order_book_item)\n\n # AssertUtil.assertItemIsValid(self, order_book_item, testing_symbol_or_symbols, platform_id, False)\n\n self.assertIsInstance(order_book_item, OrderBookItem)\n\n # Not empty\n self.assertIsNotNone(order_book_item.price)\n self.assertIsNotNone(order_book_item.amount)\n # self.assertIsNotNone(order_book_item.direction)\n # self.assertIsNotNone(order_book_item.order_count)\n\n # Type\n self.assertIsInstance(order_book_item.price, str)\n self.assertIsInstance(order_book_item.amount, str)\n if order_book_item.direction is not None:\n self.assertIsInstance(order_book_item.direction, int)\n if order_book_item.order_count is not None:\n self.assertIsInstance(order_book_item.order_count, int)\n\n # Value\n self.assertGreater(float(order_book_item.price), 0)\n self.assertGreaterEqual(float(order_book_item.amount), 0)\n if order_book_item.direction is not None:\n self.assertIn(order_book_item.direction, OrderBookDirection.name_by_value)\n if order_book_item.order_count is not None:\n self.assertGreater(order_book_item.order_count, 0)\n\n def assertAccountIsValid(self, account, platform_id=None, is_dict=False):\n if is_dict and account:\n account = Account(**account)\n\n self.assertIsInstance(account, Account)\n\n # Not empty\n self.assertIsNotNone(account.platform_id)\n self.assertIsNotNone(account.timestamp)\n self.assertIsNotNone(account.balances)\n\n # Type\n self.assertIsInstance(account.platform_id, int)\n self.assertIsInstance(account.timestamp, (int, float))\n self.assertIsInstance(account.balances, list)\n\n # Value\n self.assertEqual(account.platform_id, platform_id)\n self.assertGreater(account.timestamp, 1000000000)\n if account.is_milliseconds:\n self.assertGreater(account.timestamp, 10000000000)\n self.assertGreaterEqual(len(account.balances), 0)\n for balance in account.balances:\n AssertUtil.assertBalanceIsValid(self, balance, platform_id)\n # for debug\n balances_with_money = [balance for balance in account.balances if float(balance.amount_available) or float(balance.amount_reserved)]\n pass\n\n def assertBalanceIsValid(self, balance, platform_id=None, is_dict=False):\n if is_dict and balance:\n order = Balance(**balance)\n\n self.assertIsInstance(balance, Balance)\n\n # Not empty\n self.assertIsNotNone(balance.platform_id)\n self.assertIsNotNone(balance.symbol)\n self.assertIsNotNone(balance.amount_available)\n self.assertIsNotNone(balance.amount_reserved)\n\n # Type\n self.assertIsInstance(balance.platform_id, int)\n self.assertIsInstance(balance.symbol, str)\n self.assertIsInstance(balance.amount_available, str)\n self.assertIsInstance(balance.amount_reserved, str)\n\n # Value\n self.assertEqual(balance.platform_id, platform_id)\n self.assertEqual(balance.symbol, balance.symbol.upper())\n self.assertGreaterEqual(float(balance.amount_available), 0)\n self.assertGreaterEqual(float(balance.amount_reserved), 0)\n\n def assertOrderIsValid(self, order, testing_symbol_or_symbols=None, platform_id=None, is_dict=False):\n if is_dict and order:\n order = Order(**order)\n\n AssertUtil.assertItemIsValid(self, order, testing_symbol_or_symbols, platform_id, True)\n\n self.assertIsInstance(order, Order)\n\n # Not empty\n self.assertIsNotNone(order.user_order_id)\n self.assertIsNotNone(order.order_type)\n self.assertIsNotNone(order.price)\n self.assertIsNotNone(order.amount_original)\n self.assertIsNotNone(order.amount_executed)\n self.assertIsNotNone(order.direction)\n self.assertIsNotNone(order.order_status)\n\n # Type\n self.assertIsInstance(order.user_order_id, str)\n self.assertIsInstance(order.order_type, int)\n self.assertIsInstance(order.price, str)\n self.assertIsInstance(order.amount_original, str)\n self.assertIsInstance(order.amount_executed, str)\n self.assertIsInstance(order.direction, int)\n self.assertIsInstance(order.order_status, int)\n\n # Value\n self.assertIn(float(order.order_type), OrderType.name_by_value)\n self.assertGreater(float(order.price), 0)\n self.assertGreater(float(order.amount_original), 0)\n self.assertGreater(float(order.amount_executed), 0)\n self.assertIn(order.direction, Direction.name_by_value)\n self.assertIn(order.order_status, OrderStatus.name_by_value)\n" }, { "alpha_fraction": 0.6095399856567383, "alphanum_fraction": 0.6683635115623474, "avg_line_length": 46.783782958984375, "blob_id": "87eeec39d1cdde13de86144ee8c9b598bb82fdba", "content_id": "8a738e5c1f86d1f1efd71bfaf4047a5f0e42caf2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5304, "license_type": "permissive", "max_line_length": 116, "num_lines": 111, "path": "/hyperquant/tests/test_api.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "from unittest import TestCase\n\nfrom hyperquant.api import item_format_by_endpoint, Endpoint, Direction, convert_items_obj_to_list, \\\n convert_items_dict_to_list, convert_items_list_to_dict, convert_items_obj_to_dict, ParamName\nfrom hyperquant.clients import Trade, ItemObject\n\n\nclass TestConverting(TestCase):\n endpoint = None\n item_format = None\n\n obj_items = None\n list_items = None\n dict_items = None\n \n obj_item_short = None\n list_item_short = None\n dict_item_short = None\n\n def setUp(self):\n super().setUp()\n\n if not self.endpoint:\n self.skipTest(\"Base test\")\n\n self.item_format = item_format_by_endpoint[self.endpoint]\n\n def test_convert_items_obj_to_list(self):\n # Items to items\n self._test_convert_items(self.obj_items, self.list_items, convert_items_obj_to_list)\n # Item to item\n self._test_convert_items(self.obj_items[0], self.list_items[0], convert_items_obj_to_list)\n # Check for items which are shorter than item_format (i.e. item is ItemObject, and item_format is for Trade)\n self._test_convert_items(self.obj_item_short, self.list_item_short, convert_items_obj_to_list)\n\n # Empty to empty, None to None\n self._test_convert_items([], [], convert_items_obj_to_list)\n self._test_convert_items([None, None], [None, None], convert_items_obj_to_list)\n self._test_convert_items(None, None, convert_items_obj_to_list)\n\n def test_convert_items_dict_to_list(self):\n self._test_convert_items(self.dict_items, self.list_items, convert_items_dict_to_list)\n self._test_convert_items(self.dict_items[0], self.list_items[0], convert_items_dict_to_list)\n self._test_convert_items(self.dict_item_short, self.list_item_short, convert_items_dict_to_list)\n\n self._test_convert_items([], [], convert_items_dict_to_list)\n self._test_convert_items([None, None], [None, None], convert_items_dict_to_list)\n self._test_convert_items(None, None, convert_items_dict_to_list)\n\n def test_convert_items_list_to_dict(self):\n self._test_convert_items(self.list_items, self.dict_items, convert_items_list_to_dict)\n self._test_convert_items(self.list_items[0], self.dict_items[0], convert_items_list_to_dict)\n self._test_convert_items(self.list_item_short, self.dict_item_short, convert_items_list_to_dict)\n\n self._test_convert_items([], [], convert_items_list_to_dict)\n self._test_convert_items([None, None], [None, None], convert_items_list_to_dict)\n self._test_convert_items(None, None, convert_items_list_to_dict)\n\n def test_convert_items_obj_to_dict(self):\n self._test_convert_items(self.obj_items, self.dict_items, convert_items_obj_to_dict)\n self._test_convert_items(self.obj_items[0], self.dict_items[0], convert_items_obj_to_dict)\n self._test_convert_items(self.obj_item_short, self.dict_item_short, convert_items_obj_to_dict)\n\n self._test_convert_items([], [], convert_items_obj_to_dict)\n self._test_convert_items([None, None], [None, None], convert_items_obj_to_dict)\n self._test_convert_items(None, None, convert_items_obj_to_dict)\n\n def _test_convert_items(self, items, expected, fun):\n result = fun(items, self.item_format)\n\n self.assertEqual(expected, result)\n\n\nclass TestConvertingTrade(TestConverting):\n endpoint = Endpoint.TRADE\n\n obj_item1 = Trade()\n obj_item1.platform_id = None # None needed to test convert_items_list_to_dict() with 1 item in params\n obj_item1.symbol = \"ETHUSD\"\n obj_item1.timestamp = 143423531\n obj_item1.item_id = \"14121214\"\n obj_item1.price = \"23424546543.3\"\n obj_item1.amount = \"1110.0034\"\n obj_item1.direction = Direction.SELL\n obj_item2 = Trade()\n obj_item2.platform_id = 2\n obj_item2.symbol = \"BNBUSD\"\n obj_item2.timestamp = 143423537\n obj_item2.item_id = 15121215\n obj_item2.price = 23.235656723\n obj_item2.amount = \"0.0034345452\"\n obj_item2.direction = Direction.BUY\n\n obj_items = [obj_item1, obj_item2]\n list_items = [[None, \"ETHUSD\", 143423531, \"14121214\", \"23424546543.3\", \"1110.0034\", Direction.SELL],\n [2, \"BNBUSD\", 143423537, 15121215, 23.235656723, \"0.0034345452\", Direction.BUY]]\n dict_items = [{ParamName.PLATFORM_ID: None, ParamName.SYMBOL: \"ETHUSD\",\n ParamName.TIMESTAMP: 143423531, ParamName.ITEM_ID: \"14121214\",\n ParamName.PRICE: \"23424546543.3\", ParamName.AMOUNT: \"1110.0034\", ParamName.DIRECTION: 1},\n {ParamName.PLATFORM_ID: 2, ParamName.SYMBOL: \"BNBUSD\",\n ParamName.TIMESTAMP: 143423537, ParamName.ITEM_ID: 15121215,\n ParamName.PRICE: 23.235656723, ParamName.AMOUNT: \"0.0034345452\", ParamName.DIRECTION: 2}]\n\n obj_item_short = ItemObject()\n obj_item_short.platform_id = None # None needed to test convert_items_list_to_dict() with 1 item in params\n obj_item_short.symbol = \"ETHUSD\"\n obj_item_short.timestamp = 143423531\n obj_item_short.item_id = \"14121214\"\n list_item_short = [None, \"ETHUSD\", 143423531, \"14121214\"]\n dict_item_short = {ParamName.PLATFORM_ID: None, ParamName.SYMBOL: \"ETHUSD\",\n ParamName.TIMESTAMP: 143423531, ParamName.ITEM_ID: \"14121214\"}\n" }, { "alpha_fraction": 0.46846845746040344, "alphanum_fraction": 0.7027027010917664, "avg_line_length": 16.076923370361328, "blob_id": "729ed08dce95d1cfe680b64bb4e4d3d2753e997c", "content_id": "9b667f5fe38f0a0afc9da8b4a164d3bb7a77a191", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 222, "license_type": "permissive", "max_line_length": 25, "num_lines": 13, "path": "/requirements.txt", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "certifi==2018.11.29\nchardet==3.0.4\nclickhouse-driver==0.0.18\nDjango==2.1.7\ngevent==1.4.0\ngreenlet==0.4.15\nidna==2.8\npython-dateutil==2.8.0\npytz==2018.9\nrequests==2.21.0\nsix==1.12.0\nurllib3==1.24.1\nwebsocket-client==0.55.0\n" }, { "alpha_fraction": 0.7406976819038391, "alphanum_fraction": 0.7476744055747986, "avg_line_length": 31.452829360961914, "blob_id": "d765f1d0a48fa2b27650d977da19ed9817901018", "content_id": "8fc5646c060cff5544f50ce1ae70f438fa91357a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1720, "license_type": "permissive", "max_line_length": 119, "num_lines": 53, "path": "/hyperquant/clients/tests/test_binance.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "from hyperquant.api import Platform\nfrom hyperquant.clients import Error, ErrorCode\nfrom hyperquant.clients.binance import BinanceRESTClient, BinanceRESTConverterV1, BinanceWSClient, BinanceWSConverterV1\nfrom hyperquant.clients.tests.test_init import TestRESTClient, TestWSClient, TestConverter, TestRESTClientHistory\n\n\n# REST\n\nclass TestBinanceRESTConverterV1(TestConverter):\n converter_class = BinanceRESTConverterV1\n\n\nclass TestBinanceRESTClientV1(TestRESTClient):\n platform_id = Platform.BINANCE\n # version = \"1\"\n\n\nclass TestBinanceRESTClientHistoryV1(TestRESTClientHistory):\n platform_id = Platform.BINANCE\n # version = \"1\"\n\n is_to_item_by_id = True\n\n def test_just_logging_for_paging(self, method_name=\"fetch_trades_history\", is_auth=False, sorting=None):\n super().test_just_logging_for_paging(method_name, True, sorting)\n\n def test_fetch_trades_history_errors(self):\n super().test_fetch_trades_history_errors()\n\n # Testing create_rest_client() which must set api_key for Binance\n result = self.client.fetch_trades_history(self.testing_symbol)\n\n self.assertIsNotNone(result)\n self.assertGoodResult(result)\n\n # Note: for Binance to get trades history you must send api_key\n self.client.set_credentials(None, None)\n result = self.client.fetch_trades_history(self.testing_symbol)\n\n self.assertIsNotNone(result)\n self.assertIsInstance(result, Error)\n self.assertEqual(result.code, ErrorCode.UNAUTHORIZED)\n\n\n# WebSocket\n\nclass TestBinanceWSConverterV1(TestConverter):\n converter_class = BinanceWSConverterV1\n\n\nclass TestBinanceWSClientV1(TestWSClient):\n platform_id = Platform.BINANCE\n # version = \"1\"\n" }, { "alpha_fraction": 0.5593984723091125, "alphanum_fraction": 0.5673221349716187, "avg_line_length": 38.38496398925781, "blob_id": "a2b2bccff630f8554482bc230d1da55837c591a3", "content_id": "88f59b4d71f6b989858bc97b662ffee96446ff45", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17290, "license_type": "permissive", "max_line_length": 123, "num_lines": 439, "path": "/hyperquant/clients/bitmex.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "import hashlib\nimport hmac\nimport json\nimport time\nimport urllib\n\nfrom hyperquant.api import Platform, Sorting, Direction\nfrom hyperquant.clients import WSClient, Trade, Error, ErrorCode, Endpoint, \\\n ParamName, WSConverter, RESTConverter, PlatformRESTClient, PrivatePlatformRESTClient, ItemObject\n\n\n# REST\n\nclass BitMEXRESTConverterV1(RESTConverter):\n \"\"\"\n Go https://www.bitmex.com/api/v1/schema for whole API schema with param types keys\n which help to distinguish items from each other (for updates and removing).\n \"\"\"\n\n # Main params:\n base_url = \"https://www.bitmex.com/api/v{version}\"\n\n IS_SORTING_ENABLED = True\n\n # Settings:\n\n # Converting info:\n # For converting to platform\n endpoint_lookup = {\n Endpoint.TRADE: \"trade\",\n Endpoint.TRADE_HISTORY: \"trade\",\n }\n param_name_lookup = {\n ParamName.LIMIT: \"count\",\n ParamName.SORTING: \"reverse\",\n ParamName.FROM_ITEM: \"startTime\",\n ParamName.TO_ITEM: \"endTime\",\n ParamName.FROM_TIME: \"startTime\",\n ParamName.TO_TIME: \"endTime\",\n }\n param_value_lookup = {\n Sorting.ASCENDING: \"false\",\n Sorting.DESCENDING: \"true\",\n Sorting.DEFAULT_SORTING: Sorting.ASCENDING,\n }\n max_limit_by_endpoint = {\n Endpoint.TRADE: 500,\n Endpoint.TRADE_HISTORY: 500,\n }\n\n # For parsing\n param_lookup_by_class = {\n Error: {\n \"name\": \"code\",\n \"message\": \"message\",\n },\n Trade: {\n \"trdMatchID\": ParamName.ITEM_ID,\n \"timestamp\": ParamName.TIMESTAMP,\n \"symbol\": ParamName.SYMBOL,\n \"price\": ParamName.PRICE,\n \"size\": ParamName.AMOUNT,\n \"side\": ParamName.DIRECTION,\n },\n }\n\n error_code_by_platform_error_code = {\n # \"\": ErrorCode.UNAUTHORIZED,\n \"Unknown symbol\": ErrorCode.WRONG_SYMBOL,\n # \"ERR_RATE_LIMIT\": ErrorCode.RATE_LIMIT,\n }\n error_code_by_http_status = {\n 400: ErrorCode.WRONG_PARAM,\n 401: ErrorCode.UNAUTHORIZED,\n 429: ErrorCode.RATE_LIMIT, #?\n }\n\n # For converting time\n is_source_in_timestring = True\n timestamp_platform_names = [\"startTime\", \"endTime\"]\n\n def _process_param_value(self, name, value):\n if name == ParamName.FROM_ITEM or name == ParamName.TO_ITEM:\n if isinstance(value, ItemObject):\n timestamp = value.timestamp\n if name == ParamName.TO_ITEM:\n # Make to_item an including param (for BitMEX it's excluding)\n timestamp += (1000 if value.is_milliseconds else 1)\n return timestamp\n return super()._process_param_value(name, value)\n\n def _parse_item(self, endpoint, item_data):\n result = super()._parse_item(endpoint, item_data)\n\n # (For Trade)\n if hasattr(result, ParamName.SYMBOL) and result.symbol[0] == \".\":\n # # \".ETHUSD\" -> \"ETHUSD\"\n # result.symbol = result.symbol[1:]\n # https://www.bitmex.com/api/explorer/#!/Trade/Trade_get Please note\n # that indices (symbols starting with .) post trades at intervals to\n # the trade feed. These have a size of 0 and are used only to indicate\n # a changing price.\n return None\n\n # Convert direction\n if result and isinstance(result, Trade):\n result.direction = Direction.BUY if result.direction == \"Buy\" else (\n Direction.SELL if result.direction == \"Sell\" else None)\n result.price = str(result.price)\n result.amount = str(result.amount)\n return result\n\n def parse_error(self, error_data=None, response=None):\n if error_data and \"error\" in error_data:\n error_data = error_data[\"error\"]\n if \"Maximum result count is 500\" in error_data[\"message\"]:\n error_data[\"name\"] = ErrorCode.WRONG_LIMIT\n result = super().parse_error(error_data, response)\n return result\n\n\nclass BitMEXRESTClient(PrivatePlatformRESTClient):\n platform_id = Platform.BITMEX\n version = \"1\" # Default version\n\n IS_NONE_SYMBOL_FOR_ALL_SYMBOLS = True\n\n _converter_class_by_version = {\n \"1\": BitMEXRESTConverterV1,\n }\n\n def _on_response(self, response, result):\n # super()._on_response(response)\n\n if not response.ok and \"Retry-After\" in response.headers:\n self.delay_before_next_request_sec = int(response.headers[\"Retry-After\"])\n else:\n # \"x-ratelimit-limit\": 300\n # \"x-ratelimit-remaining\": 297\n # \"x-ratelimit-reset\": 1489791662\n try:\n ratelimit = int(response.headers[\"x-ratelimit-limit\"])\n remaining_requests = float(response.headers[\"x-ratelimit-remaining\"])\n reset_ratelimit_timestamp = int(response.headers[\"x-ratelimit-reset\"])\n if remaining_requests < ratelimit * 0.1:\n precision_sec = 1 # Current machine time may not precise which can cause ratelimit error\n self.delay_before_next_request_sec = reset_ratelimit_timestamp - time.time() + precision_sec\n else:\n self.delay_before_next_request_sec = 0\n self.logger.debug(\"Ratelimit info. remaining_requests: %s/%s delay: %s\",\n remaining_requests, ratelimit, self.delay_before_next_request_sec)\n except Exception as error:\n self.logger.exception(\"Error while defining delay_before_next_request_sec.\", error)\n\n def get_symbols(self, version=None):\n # BitMEX has no get_symbols method in API,\n # and None means \"all symbols\" if defined as symbol param.\n return None\n\n # If symbol not specified all symbols will be returned\n # todo fetch_latest_trades()\n def fetch_trades(self, symbol=None, limit=None, **kwargs):\n # symbol = None\n return super().fetch_trades(symbol, limit, **kwargs)\n\n # If symbol not specified all symbols will be returned\n def fetch_trades_history(self, symbol=None, limit=None, from_item=None,\n sorting=None, from_time=None, to_time=None, **kwargs):\n # Note: from_item used automatically for paging; from_time and to_time - used for custom purposes\n return super().fetch_trades_history(symbol, limit, from_item, sorting=sorting,\n from_time=from_time, to_time=to_time, **kwargs)\n\n # tickers are in instruments\n\n\n# WebSockets\n\nclass BitMEXWSConverterV1(WSConverter):\n # Main params:\n base_url = \"wss://www.bitmex.com/realtime\"\n\n IS_SUBSCRIPTION_COMMAND_SUPPORTED = True\n\n # # symbol_endpoints = [\"execution\", \"instrument\", \"order\", \"orderBookL2\", \"position\", \"quote\", \"trade\"]\n # # supported_endpoints = symbolSubs + [\"margin\"]\n # supported_endpoints = [Endpoint.TRADE]\n # symbol_endpoints = [Endpoint.TRADE]\n\n # Settings:\n\n # Converting info:\n # For converting to platform\n endpoint_lookup = {\n Endpoint.TRADE: \"trade:{symbol}\",\n # Endpoint.TRADE: lambda params: \"trade:\" + params[Param.SYMBOL] if Param.SYMBOL in params else \"trade\",\n }\n\n # For parsing\n param_lookup_by_class = {\n Error: {\n \"status\": \"code\",\n \"error\": \"message\",\n },\n Trade: {\n \"trdMatchID\": ParamName.ITEM_ID,\n \"timestamp\": ParamName.TIMESTAMP,\n \"symbol\": ParamName.SYMBOL,\n \"price\": ParamName.PRICE,\n \"size\": ParamName.AMOUNT,\n \"side\": ParamName.DIRECTION,\n },\n }\n event_type_param = \"table\"\n\n # error_code_by_platform_error_code = {\n # # # \"\": ErrorCode.UNAUTHORIZED,\n # # \"Unknown symbol\": ErrorCode.WRONG_SYMBOL,\n # # # \"ERR_RATE_LIMIT\": ErrorCode.RATE_LIMIT,\n # }\n\n # For converting time\n is_source_in_timestring = True\n # timestamp_platform_names = []\n\n def parse(self, endpoint, data):\n if data:\n endpoint = data.get(self.event_type_param)\n if \"error\" in data:\n result = self.parse_error(data)\n if \"request\" in data:\n result.message += \"request: \" + json.dumps(data[\"request\"])\n return result\n if \"data\" in data:\n data = data[\"data\"]\n return super().parse(endpoint, data)\n\n def _parse_item(self, endpoint, item_data):\n result = super()._parse_item(endpoint, item_data)\n\n # (For Trade)\n if hasattr(result, ParamName.SYMBOL) and result.symbol[0] == \".\":\n # # \".ETHUSD\" -> \"ETHUSD\"\n # result.symbol = result.symbol[1:]\n # https://www.bitmex.com/api/explorer/#!/Trade/Trade_get Please note\n # that indices (symbols starting with .) post trades at intervals to\n # the trade feed. These have a size of 0 and are used only to indicate\n # a changing price.\n return None\n\n # Convert direction\n if result and isinstance(result, Trade):\n result.direction = Direction.BUY if result.direction == \"Buy\" else (\n Direction.SELL if result.direction == \"Sell\" else None)\n result.price = str(result.price)\n result.amount = str(result.amount)\n return result\n\n\nclass BitMEXWSClient(WSClient):\n platform_id = Platform.BITMEX\n version = \"1\" # Default version\n\n _converter_class_by_version = {\n \"1\": BitMEXWSConverterV1,\n }\n\n @property\n def url(self):\n self.is_subscribed_with_url = True\n params = {\"subscribe\": \",\".join(self.current_subscriptions)}\n url, platform_params = self.converter.make_url_and_platform_params(params=params, is_join_get_params=True)\n return url\n\n @property\n def headers(self):\n result = super().headers or []\n # Return auth headers\n if self._api_key:\n self.logger.info(\"Authenticating with API Key.\")\n # To auth to the WS using an API key, we generate\n # a signature of a nonce and the WS API endpoint.\n expire = generate_nonce()\n result += [\n \"api-expires: \" + str(expire),\n ]\n if self._api_key and self._api_secret:\n signature = generate_signature(self._api_secret, \"GET\", \"/realtime\", expire, \"\")\n result += [\n \"api-signature: \" + signature,\n \"api-key: \" + self._api_key,\n ]\n else:\n self.logger.info(\"Not authenticating by headers because api_key is not set.\")\n\n return result\n\n # def _on_message(self, message):\n # \"\"\"Handler for parsing WS messages.\"\"\"\n # self.logger.debug(message)\n # message = json.loads(message)\n\n # def on_item_received(self, item):\n # super().on_item_received(item)\n #\n # # table = message[\"table\"] if \"table\" in message else None\n # # action = message[\"action\"] if \"action\" in message else None\n # # try:\n # # if \"subscribe\" in message:\n # # self.logger.debug(\"Subscribed to %s.\" % message[\"subscribe\"])\n # # elif action:\n # #\n # # if table not in self.data:\n # # self.data[table] = []\n # #\n # # # There are four possible actions from the WS:\n # # # \"partial\" - full table image\n # # # \"insert\" - new row\n # # # \"update\" - update row\n # # # \"delete\" - delete row\n # # if action == \"partial\":\n # # self.logger.debug(\"%s: partial\" % table)\n # # self.data[table] += message[\"data\"]\n # # # Keys are communicated on partials to let you know how to uniquely identify\n # # # an item. We use it for updates.\n # # self.keys[table] = message[\"keys\"]\n # # elif action == \"insert\":\n # # self.logger.debug(\"%s: inserting %s\" % (table, message[\"data\"]))\n # # self.data[table] += message[\"data\"]\n # #\n # # # Limit the max length of the table to avoid excessive memory usage.\n # # # Don't trim orders because we'll lose valuable state if we do.\n # # if table not in [\"order\", \"orderBookL2\"] and len(self.data[table]) > BitMEXWebsocket.MAX_TABLE_LEN:\n # # self.data[table] = self.data[table][int(BitMEXWebsocket.MAX_TABLE_LEN / 2):]\n # #\n # # elif action == \"update\":\n # # self.logger.debug(\"%s: updating %s\" % (table, message[\"data\"]))\n # # # Locate the item in the collection and update it.\n # # for updateData in message[\"data\"]:\n # # item = findItemByKeys(self.keys[table], self.data[table], updateData)\n # # if not item:\n # # return # No item found to update. Could happen before push\n # # item.update(updateData)\n # # # Remove cancelled / filled orders\n # # if table == \"order\" and item[\"leavesQty\"] <= 0:\n # # self.data[table].remove(item)\n # # elif action == \"delete\":\n # # self.logger.debug(\"%s: deleting %s\" % (table, message[\"data\"]))\n # # # Locate the item in the collection and remove it.\n # # for deleteData in message[\"data\"]:\n # # item = findItemByKeys(self.keys[table], self.data[table], deleteData)\n # # self.data[table].remove(item)\n # # else:\n # # raise Exception(\"Unknown action: %s\" % action)\n # # except:\n # # self.logger.error(traceback.format_exc())\n\n # def get_instrument(self):\n # \"\"\"Get the raw instrument data for this symbol.\"\"\"\n # # Turn the \"tickSize\" into \"tickLog\" for use in rounding\n # instrument = self.data[\"instrument\"][0]\n # instrument[\"tickLog\"] = int(math.fabs(math.log10(instrument[\"tickSize\"])))\n # return instrument\n #\n # def get_ticker(self):\n # \"\"\"Return a ticker object. Generated from quote and trade.\"\"\"\n # lastQuote = self.data[\"quote\"][-1]\n # lastTrade = self.data[\"trade\"][-1]\n # ticker = {\n # \"last\": lastTrade[\"price\"],\n # \"buy\": lastQuote[\"bidPrice\"],\n # \"sell\": lastQuote[\"askPrice\"],\n # \"mid\": (float(lastQuote[\"bidPrice\"] or 0) + float(lastQuote[\"askPrice\"] or 0)) / 2\n # }\n #\n # # The instrument has a tickSize. Use it to round values.\n # instrument = self.data[\"instrument\"][0]\n # return {k: round(float(v or 0), instrument[\"tickLog\"]) for k, v in ticker.items()}\n #\n # def funds(self):\n # \"\"\"Get your margin details.\"\"\"\n # return self.data[\"margin\"][0]\n #\n # def market_depth(self):\n # \"\"\"Get market depth (orderbook). Returns all levels.\"\"\"\n # return self.data[\"orderBookL2\"]\n #\n # def open_orders(self, clOrdIDPrefix):\n # \"\"\"Get all your open orders.\"\"\"\n # orders = self.data[\"order\"]\n # # Filter to only open orders (leavesQty > 0) and those that we actually placed\n # return [o for o in orders if str(o[\"clOrdID\"]).startswith(clOrdIDPrefix) and o[\"leavesQty\"] > 0]\n #\n # def recent_trades(self):\n # \"\"\"Get recent trades.\"\"\"\n # return self.data[\"trade\"]\n\n def _send_subscribe(self, subscriptions):\n self._send_command(\"subscribe\", subscriptions)\n\n def _send_unsubscribe(self, subscriptions):\n self._send_command(\"unsubscribe\", subscriptions)\n\n def _send_command(self, command, params=None):\n if params is None:\n params = []\n self._send({\"op\": command, \"args\": list(params)})\n\n\n# Utility\n\ndef generate_nonce():\n return int(round(time.time() + 3600))\n\n\ndef generate_signature(secret, method, url, nonce, data):\n \"\"\"\n Generates an API signature compatible with BitMEX..\n A signature is HMAC_SHA256(secret, method + path + nonce + data), hex encoded.\n Verb must be uppercased, url is relative, nonce must be an increasing 64-bit integer\n and the data, if present, must be JSON without whitespace between keys.\n\n For example, in pseudocode (and in real code below):\n method=POST\n url=/api/v1/order\n nonce=1416993995705\n data={\"symbol\":\"XBTZ14\",\"quantity\":1,\"price\":395.01}\n signature = HEX(HMAC_SHA256(secret, 'POST/api/v1/order1416993995705{\"symbol\":\"XBTZ14\",\"amount\":1,\"price\":395.01}'))\n \"\"\"\n # Parse the url so we can remove the base and extract just the path.\n parsed_url = urllib.parse.urlparse(url)\n path = parsed_url.path\n if parsed_url.query:\n path = path + '?' + parsed_url.query\n\n # print \"Computing HMAC: %s\" % verb + path + str(nonce) + data\n message = (method + path + str(nonce) + data).encode('utf-8')\n\n signature = hmac.new(secret.encode('utf-8'), message, digestmod=hashlib.sha256).hexdigest()\n return signature\n" }, { "alpha_fraction": 0.7231467366218567, "alphanum_fraction": 0.7326021194458008, "avg_line_length": 27.12765884399414, "blob_id": "d9de1d6475acdb587189d37bff4ac7a0e675e8d6", "content_id": "17468d93549fc480e4bcda8ed7a93bebc608f97a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2644, "license_type": "permissive", "max_line_length": 113, "num_lines": 94, "path": "/hyperquant/clients/tests/test_bitfinex.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "from hyperquant.api import Platform\nfrom hyperquant.clients.bitfinex import BitfinexRESTClient, BitfinexWSClient, \\\n BitfinexRESTConverterV2, BitfinexRESTConverterV1, BitfinexWSConverterV1, BitfinexWSConverterV2\nfrom hyperquant.clients.tests.test_init import TestRESTClient, TestWSClient, TestConverter, TestRESTClientHistory\n\n\n# REST\n\nclass TestBitfinexRESTConverterV1(TestConverter):\n converter_class = BitfinexRESTConverterV1\n\n\nclass TestBitfinexRESTConverterV2(TestConverter):\n converter_class = BitfinexRESTConverterV2\n\n\nclass TestBitfinexRESTClientV1(TestRESTClient):\n platform_id = Platform.BITFINEX\n version = \"1\"\n\n has_limit_error = False\n is_symbol_case_sensitive = False\n\n\nclass TestBitfinexRESTClientHistoryV1(TestRESTClientHistory):\n platform_id = Platform.BITFINEX\n version = \"1\"\n\n has_limit_error = False\n is_symbol_case_sensitive = False\n\n is_pagination_supported = False\n is_to_item_supported = False\n\n\nclass TestBitfinexRESTClientV2(TestRESTClient):\n client_class = BitfinexRESTClient\n version = \"2\"\n\n testing_symbol = \"ETHUSD\"\n is_sorting_supported = True\n\n has_limit_error = True\n is_symbol_case_sensitive = True\n\n def test_fetch_trades_errors(self, method_name=\"fetch_trades\", is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n # Wrong symbol\n result = getattr(client, method_name)(self.wrong_symbol)\n\n # Empty list instead of error\n # (todo check, may be we should create error for each empty list returned +++ yes, we should!)\n self.assertEqual(result, [])\n\n\nclass TestBitfinexRESTClientHistoryV2(TestRESTClientHistory):\n client_class = BitfinexRESTClient\n version = \"2\"\n\n testing_symbol = \"ETHUSD\"\n is_sorting_supported = True\n\n has_limit_error = True\n is_symbol_case_sensitive = True\n\n def test_fetch_trades_errors(self, method_name=\"fetch_trades\", is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n # Wrong symbol\n result = getattr(client, method_name)(self.wrong_symbol)\n\n # Empty list instead of error\n # (todo check, may be we should create error for each empty list returned +++ yes, we should!)\n self.assertEqual(result, [])\n\n\n# WebSocket\n\nclass TestBitfinexWSConverterV1(TestConverter):\n converter_class = BitfinexWSConverterV1\n\n\nclass TestBitfinexWSConverterV2(TestConverter):\n converter_class = BitfinexWSConverterV2\n\n\nclass TestBitfinexWSClientV1(TestWSClient):\n platform_id = Platform.BITFINEX\n version = \"1\"\n\n\nclass TestBitfinexWSClientV2(TestBitfinexWSClientV1):\n version = \"2\"\n" }, { "alpha_fraction": 0.6190177202224731, "alphanum_fraction": 0.6206437945365906, "avg_line_length": 36.50381088256836, "blob_id": "21207512fb50052875296dd63ffc32586a5c1ea2", "content_id": "2fd1c14a69afcf97a5f59effd6b17cac495bf9aa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54276, "license_type": "permissive", "max_line_length": 128, "num_lines": 1443, "path": "/hyperquant/clients/__init__.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "import json\nimport logging\nimport time\nfrom datetime import datetime\nfrom operator import itemgetter\nfrom threading import Thread\nfrom urllib.parse import urljoin, urlencode\n\nimport requests\nfrom dateutil import parser\nfrom websocket import WebSocketApp\n\nfrom hyperquant.api import ParamName, ParamValue, ErrorCode, Endpoint, Platform, Sorting, OrderType\n\n\"\"\"\nAPI clients for various trading platforms: REST and WebSocket.\n\nSome documentation:\nhttps://docs.google.com/document/d/1U3kuokpeNSzxSbXhXJ3XnNYbfZaK5nY3_tAL-Uk0wKQ\n\"\"\"\n\n\n# Value objects\n\nclass ValueObject:\n pass\n\n\n# WS\nclass Info(ValueObject):\n code = None\n message = None\n\n\n# WS\nclass Channel(ValueObject):\n channel_id = None\n channel = None\n symbol = None\n\n\nclass Error(ValueObject):\n code = None\n message = None\n\n def __str__(self) -> str:\n return \"[Trading-Error code: %s msg: %s]\" % (self.code, self.message)\n\n\nclass DataObject(ValueObject):\n pass\n\n is_milliseconds = False\n\n\nclass ItemObject(DataObject):\n # (Note: Order is from abstract to concrete)\n platform_id = None\n symbol = None\n timestamp = None # Unix timestamp in milliseconds\n item_id = None # There is no item_id for candle, ticker, bookticker, only for trade, mytrade and order\n\n def __init__(self, platform_id=None, symbol=None, timestamp=None, item_id=None, is_milliseconds=False) -> None:\n super().__init__()\n self.platform_id = platform_id\n self.symbol = symbol\n self.timestamp = timestamp\n self.item_id = item_id\n\n self.is_milliseconds = is_milliseconds\n\n def __eq__(self, o: object) -> bool:\n # Identifying params:\n return o and \\\n self.platform_id == o.platform_id and \\\n self.item_id == o.item_id and \\\n self.timestamp == o.timestamp and \\\n self.symbol == o.symbol\n\n def __hash__(self) -> int:\n return hash((self.platform_id, self.item_id, self.timestamp))\n\n def __repr__(self) -> str:\n platform_name = Platform.get_platform_name_by_id(self.platform_id)\n timestamp_s = self.timestamp / 1000 if self.is_milliseconds else self.timestamp\n timestamp_iso = datetime.utcfromtimestamp(timestamp_s).isoformat() if timestamp_s else timestamp_s\n return \"[Item-%s id:%s time:%s symbol:%s]\" % (platform_name, self.item_id, timestamp_iso, self.symbol)\n\n\nclass Trade(ItemObject):\n # Trade data:\n price = None\n amount = None\n\n # Not for all platforms or versions:\n direction = None\n\n def __init__(self, platform_id=None, symbol=None, timestamp=None, item_id=None,\n price=None, amount=None, direction=None, is_milliseconds=False) -> None:\n super().__init__(platform_id, symbol, timestamp, item_id, is_milliseconds)\n self.price = price\n self.amount = amount\n self.direction = direction\n\n\nclass MyTrade(Trade):\n order_id = None\n\n # Optional (not for all platforms):\n fee = None # Комиссия биржи # must be always positive; 0 if not supported\n rebate = None # Возврат денег, скидка после покупки # must be always positive; 0 if not supported\n # fee_symbol = None # Currency symbol, by default, it's the same as for price\n # Note: volume = price * amount, total = volume - fee + rebate\n\n def __init__(self, platform_id=None, symbol=None, timestamp=None, item_id=None, price=None, amount=None,\n direction=None, order_id=None, fee=None, rebate=None, is_milliseconds=False) -> None:\n super().__init__(platform_id, symbol, timestamp, item_id, price, amount, direction, is_milliseconds)\n self.order_id = order_id\n self.fee = fee\n self.rebate = rebate\n\n\nclass Candle(ItemObject):\n # platform_id = None\n # symbol = None\n # timestamp = None # open_timestamp\n interval = None\n\n price_open = None\n price_close = None\n price_high = None\n price_low = None\n amount = None\n\n # Optional\n trades_count = None\n\n def __init__(self, platform_id=None, symbol=None, timestamp=None, interval=None,\n price_open=None, price_close=None, price_high=None, price_low=None,\n amount=None, trades_count=None, is_milliseconds=False) -> None:\n super().__init__(platform_id, symbol, timestamp, None, is_milliseconds)\n self.interval = interval\n self.price_open = price_open\n self.price_close = price_close\n self.price_high = price_high\n self.price_low = price_low\n self.amount = amount\n self.trades_count = trades_count\n\n\nclass Ticker(ItemObject):\n # platform_id = None\n # symbol = None\n # timestamp = None\n\n price = None\n\n def __init__(self, platform_id=None, symbol=None, timestamp=None, price=None, is_milliseconds=False) -> None:\n super().__init__(platform_id, symbol, timestamp, None, is_milliseconds)\n self.price = price\n\n\n# class BookTicker(DataObject):\n# symbol = None\n# price_bid = None\n# bid_amount = None\n# price_ask = None\n# ask_amount = None\n\n\nclass OrderBook(ItemObject):\n asks = None\n bids = None\n\n def __init__(self, platform_id=None, symbol=None, timestamp=None, item_id=None, is_milliseconds=False,\n asks=None, bids=None) -> None:\n super().__init__(platform_id, symbol, timestamp, item_id, is_milliseconds)\n self.asks = asks\n self.bids = bids\n\n\nclass OrderBookItem(ItemObject):\n # platform_id = None\n # order_book_item_id = None # item_id = None\n # symbol = None\n\n price = None\n amount = None\n direction = None\n\n # Optional\n order_count = None\n\n def __init__(self, platform_id=None, symbol=None, timestamp=None, item_id=None, is_milliseconds=False,\n price=None, amount=None, direction=None, order_count=None) -> None:\n super().__init__(platform_id, symbol, timestamp, item_id, is_milliseconds)\n\n self.price = price\n self.amount = amount\n self.direction = direction\n self.order_count = order_count\n\n\nclass Account(DataObject):\n platform_id = None\n timestamp = None\n\n balances = None\n\n # Binance other params:\n # \"makerCommission\": 15,\n # \"takerCommission\": 15,\n # \"buyerCommission\": 0,\n # \"sellerCommission\": 0,\n # \"canTrade\": true,\n # \"canWithdraw\": true,\n # \"canDeposit\": true,\n\n def __init__(self, platform_id=None, timestamp=None, balances=None) -> None:\n super().__init__()\n self.platform_id = platform_id\n self.timestamp = timestamp\n self.balances = balances\n\n\nclass Balance(ValueObject):\n # Asset, currency\n platform_id = None\n symbol = None\n amount_available = None\n amount_reserved = None\n\n def __init__(self, platform_id=None, symbol=None, amount_available=None, amount_reserved=None) -> None:\n super().__init__()\n self.platform_id = platform_id\n self.symbol = symbol\n self.amount_available = amount_available\n self.amount_reserved = amount_reserved\n\n\nclass Order(ItemObject):\n # platform_id = None\n # item_id = None\n # symbol = None\n # timestamp = None # (transact timestamp)\n user_order_id = None\n\n order_type = None # limit and market\n price = None\n amount_original = None\n amount_executed = None\n direction = None\n\n order_status = None # open and close\n\n def __init__(self, platform_id=None, symbol=None, timestamp=None, item_id=None, is_milliseconds=False,\n user_order_id=None, order_type=None, price=None,\n amount_original=None, amount_executed=None, direction=None, order_status=None) -> None:\n super().__init__(platform_id, symbol, timestamp, item_id, is_milliseconds)\n self.user_order_id = user_order_id\n self.order_type = order_type\n self.price = price\n self.amount_original = amount_original\n self.amount_executed = amount_executed\n self.direction = direction\n self.order_status = order_status\n\n\n# Base\n\nclass ProtocolConverter:\n \"\"\"\n Contains all the info and logic to convert data between\n our library API and remote platform API.\n \"\"\"\n\n # Main params:\n # (Set by client or set it by yourself in subclass)\n platform_id = None\n version = None\n # (Define in subclass)\n base_url = None\n\n # Settings:\n is_use_max_limit = False\n\n # Converting info:\n # Our endpoint to platform_endpoint\n endpoint_lookup = None # {\"endpoint\": \"platform_endpoint\", ...}\n # Our param_name to platform_param_name\n param_name_lookup = None # {ParamName.FROM_TIME: \"start\", \"not_supported\": None, ...}\n # Our param_value to platform_param_value\n param_value_lookup = None # {Sorting.ASCENDING: 0}\n max_limit_by_endpoint = None\n\n # For parsing\n item_class_by_endpoint = {\n Endpoint.TRADE: Trade,\n Endpoint.TRADE_HISTORY: Trade,\n Endpoint.TRADE_MY: MyTrade,\n Endpoint.CANDLE: Candle,\n Endpoint.TICKER: Ticker,\n Endpoint.ORDER_BOOK: OrderBook,\n Endpoint.ORDER_BOOK_DIFF: OrderBook,\n # Private\n Endpoint.ACCOUNT: Account,\n Endpoint.ORDER: Order,\n Endpoint.ORDER_CURRENT: Order,\n Endpoint.ORDER_MY: Order,\n }\n # {Trade: {ParamName.ITEM_ID: \"tid\", ...}} - omitted properties won't be set\n param_lookup_by_class = None\n\n error_code_by_platform_error_code = None\n error_code_by_http_status = None\n\n # For converting time\n use_milliseconds = False # todo always use milliseconds\n is_source_in_milliseconds = False\n is_source_in_timestring = False\n timestamp_platform_names = None # [\"startTime\", \"endTime\"]\n # (If platform api is not consistent)\n timestamp_platform_names_by_endpoint = None # {Endpoint.TRADE: [\"start\", \"end\"]}\n ITEM_TIMESTAMP_ATTR = ParamName.TIMESTAMP\n\n def __init__(self, platform_id=None, version=None):\n if platform_id is not None:\n self.platform_id = platform_id\n if version is not None:\n self.version = version\n\n # Create logger\n platform_name = Platform.get_platform_name_by_id(self.platform_id)\n self.logger = logging.getLogger(\"%s.%s.v%s\" % (\"Converter\", platform_name, self.version))\n\n # Convert to platform format\n\n def make_url_and_platform_params(self, endpoint=None, params=None, is_join_get_params=False, version=None):\n # Apply version on base_url\n version = version or self.version\n url = self.base_url.format(version=version) if self.base_url and version else self.base_url\n # Prepare path and params\n url_resources, platform_params = self.prepare_params(endpoint, params)\n\n # Make resulting URL\n # url=ba://se_url/resou/rces?p=ar&am=s\n if url_resources and url:\n url = urljoin(url + \"/\", \"/\".join(url_resources))\n if platform_params and is_join_get_params:\n url = url + \"?\" + urlencode(platform_params)\n return url, platform_params\n\n def prepare_params(self, endpoint=None, params=None):\n # Override in subclasses if it is the only way to adopt client to platform\n\n # Convert our code's names to custom platform's names\n platform_params = {self._get_platform_param_name(key): self._process_param_value(key, value)\n for key, value in params.items() if value is not None} if params else {}\n # (Del not supported by platform params which defined in lookups as empty)\n platform_params.pop(\"\", \"\")\n platform_params.pop(None, None)\n self._convert_timestamp_values_to_platform(endpoint, platform_params)\n\n # Endpoint.TRADE -> \"trades/ETHBTC\" or \"trades\"\n platform_endpoint = self._get_platform_endpoint(endpoint, params)\n\n # Make path part of URL (as a list) using endpoint and params\n resources = [platform_endpoint] if platform_endpoint else []\n\n return resources, platform_params\n\n def _process_param_value(self, name, value):\n # Convert values to platform values\n # if name in ParamValue.param_names:\n value = self._get_platform_param_value(value, name)\n return value\n\n def _get_platform_endpoint(self, endpoint, params):\n # Convert our code's endpoint to custom platform's endpoint\n\n # Endpoint.TRADE -> \"trades/{symbol}\" or \"trades\" or lambda params: \"trades\"\n platform_endpoint = self.endpoint_lookup.get(endpoint, endpoint) if self.endpoint_lookup else endpoint\n if callable(platform_endpoint):\n platform_endpoint = platform_endpoint(params)\n if platform_endpoint:\n # \"trades\", {\"symbol\": \"ETHBTC\"} => \"trades\" (no error)\n # \"trades/{symbol}/hist\", {\"symbol\": \"ETHBTC\"} => \"trades/ETHBTC/hist\"\n # \"trades/{symbol}/hist\", {} => Error!\n platform_endpoint = platform_endpoint.format(**params)\n\n return platform_endpoint\n\n def _get_platform_param_name(self, name):\n # Convert our code's param name to custom platform's param name\n return self.param_name_lookup.get(name, name) if self.param_name_lookup else name\n\n def _get_platform_param_value(self, value, name=None):\n # Convert our code's param value to custom platform's param value\n lookup = self.param_value_lookup\n lookup = lookup.get(name, lookup) if lookup else None\n return lookup.get(value, value) if lookup else value\n\n # Convert from platform format\n\n def parse(self, endpoint, data):\n # if not endpoint or not data:\n # self.logger.warning(\"Some argument is empty in parse(). endpoint: %s, data: %s\", endpoint, data)\n # return data\n if not data:\n self.logger.warning(\"Data argument is empty in parse(). endpoint: %s, data: %s\", endpoint, data)\n return data\n\n # (If list of items data, but not an item data as a list)\n if isinstance(data, list): # and not isinstance(data[0], list):\n result = [self._parse_item(endpoint, item_data) for item_data in data]\n # (Skip empty)\n result = [item for item in result if item]\n return result\n else:\n return self._parse_item(endpoint, data)\n\n def _parse_item(self, endpoint, item_data):\n # Check item_class by endpoint\n if not endpoint or not self.item_class_by_endpoint or endpoint not in self.item_class_by_endpoint:\n self.logger.warning(\"Wrong endpoint: %s in parse_item().\", endpoint)\n return item_data\n item_class = self.item_class_by_endpoint[endpoint]\n\n # Create and set up item by item_data (using lookup to convert property names)\n item = self._create_and_set_up_object(item_class, item_data)\n item = self._post_process_item(item)\n return item\n\n def _post_process_item(self, item):\n # Process parsed values (convert from platform)\n # Set platform_id\n if hasattr(item, ParamName.PLATFORM_ID):\n item.platform_id = self.platform_id\n # Stringify item_id\n if hasattr(item, ParamName.ITEM_ID) and item.item_id is not None:\n item.item_id = str(item.item_id)\n # Convert timestamp\n # (If API returns milliseconds or string date we must convert them to Unix timestamp (in seconds or ms))\n # (Note: add here more timestamp attributes if you use another name in your VOs)\n if hasattr(item, self.ITEM_TIMESTAMP_ATTR) and item.timestamp:\n item.timestamp = self._convert_timestamp_from_platform(item.timestamp)\n item.is_milliseconds = self.use_milliseconds\n # Convert asks and bids to OrderBookItem type\n if hasattr(item, ParamName.ASKS) and item.asks:\n item.asks = [self._create_and_set_up_object(OrderBookItem, item_data)\n for item_data in item.asks]\n if hasattr(item, ParamName.BIDS) and item.bids:\n item.bids = [self._create_and_set_up_object(OrderBookItem, item_data)\n for item_data in item.bids]\n # Convert items to Balance type\n if hasattr(item, ParamName.BALANCES) and item.balances:\n item.balances = [self._create_and_set_up_object(Balance, item_data)\n for item_data in item.balances]\n # Set platform_id\n for balance in item.balances:\n self._post_process_item(balance)\n\n return item\n\n def parse_error(self, error_data=None, response=None):\n # (error_data=None and response!=None when REST API returns 404 and html response)\n if response and response.ok:\n return None\n\n result = self._create_and_set_up_object(Error, error_data) or Error()\n response_message = \" (status: %s %s code: %s msg: %s)\" % (\n response.status_code, response.reason, result.code, result.message) if response \\\n else \" (code: %s msg: %s)\" % (result.code, result.message)\n if not result.code:\n result.code = response.status_code\n result.code = self.error_code_by_platform_error_code.get(result.code, result.code) \\\n if self.error_code_by_platform_error_code else result.code\n result.message = ErrorCode.get_message_by_code(result.code) + response_message\n return result\n\n def _create_and_set_up_object(self, object_class, data):\n if not object_class or not data:\n return None\n\n obj = object_class()\n lookup = self.param_lookup_by_class.get(object_class) if self.param_lookup_by_class else None\n if not lookup:\n # self.logger.error(\"There is no lookup for %s in %s\", object_class, self.__class__)\n raise Exception(\"There is no lookup for %s in %s\" % (object_class, self.__class__))\n # (Lookup is usually a dict, but can be a list when item_data is a list)\n key_pair = lookup.items() if isinstance(lookup, dict) else enumerate(lookup)\n for platform_key, key in key_pair:\n if key and (not isinstance(data, dict) or platform_key in data):\n setattr(obj, key, data[platform_key])\n return obj\n\n # Convert from and to platform\n\n def _convert_timestamp_values_to_platform(self, endpoint, platform_params):\n if not platform_params:\n return\n timestamp_platform_names = self.timestamp_platform_names_by_endpoint.get(\n endpoint, self.timestamp_platform_names) \\\n if self.timestamp_platform_names_by_endpoint else self.timestamp_platform_names\n if not timestamp_platform_names:\n return\n\n for name in timestamp_platform_names:\n if name in platform_params:\n value = platform_params[name]\n if isinstance(value, ValueObject):\n value = getattr(value, self.ITEM_TIMESTAMP_ATTR, value)\n platform_params[name] = self._convert_timestamp_to_platform(value)\n\n def _convert_timestamp_to_platform(self, timestamp):\n if not timestamp:\n return timestamp\n\n if self.use_milliseconds:\n timestamp /= 1000\n\n if self.is_source_in_milliseconds:\n timestamp *= 1000\n elif self.is_source_in_timestring:\n dt = datetime.utcfromtimestamp(timestamp)\n timestamp = dt.isoformat()\n return timestamp\n\n def _convert_timestamp_from_platform(self, timestamp):\n if not timestamp:\n return timestamp\n if self.is_source_in_milliseconds:\n timestamp /= 1000\n # if int(timestamp) == timestamp:\n # timestamp = int(timestamp)\n elif self.is_source_in_timestring:\n timestamp = parser.parse(timestamp).timestamp()\n\n if self.use_milliseconds:\n timestamp = int(timestamp * 1000)\n return timestamp\n\n\nclass BaseClient:\n \"\"\"\n All time params are unix timestamps in seconds (float or int).\n \"\"\"\n\n # Main params\n _log_prefix = \"Client\"\n platform_id = None\n version = None\n _api_key = None\n _api_secret = None\n default_converter_class = ProtocolConverter\n _converter_class_by_version = None\n _converter_by_version = None\n\n # If True then if \"symbol\" param set to None that will return data for \"all symbols\"\n IS_NONE_SYMBOL_FOR_ALL_SYMBOLS = False\n\n @property\n def headers(self):\n # Usually returns auth and other headers (Don't return None)\n # (as a dict for requests (REST) and a list for WebSockets (WS))\n return []\n\n @property\n def use_milliseconds(self):\n return self.converter.use_milliseconds\n\n @use_milliseconds.setter\n def use_milliseconds(self, value):\n self.converter.use_milliseconds = value\n\n def __init__(self, version=None, **kwargs) -> None:\n super().__init__()\n\n if version is not None:\n self.version = str(version)\n\n # Set up settings\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n # Create logger\n platform_name = Platform.get_platform_name_by_id(self.platform_id)\n self.logger = logging.getLogger(\"%s.%s.v%s\" % (self._log_prefix, platform_name, self.version))\n # self.logger.debug(\"Create %s client for %s platform. url+params: %s\",\n # self._log_prefix, platform_name, self.make_url_and_platform_params())\n\n # Create converter\n self.converter = self.get_or_create_converter()\n if not self.converter:\n raise Exception(\"There is no converter_class in %s for version: %s\" % (self.__class__, self.version))\n\n def set_credentials(self, api_key, api_secret):\n self._api_key = api_key\n self._api_secret = api_secret\n\n def get_or_create_converter(self, version=None):\n # Converter stores all the info about a platform\n # Note: Using version to get converter at any time allows us to easily\n # switch version for just one request or for all further requests\n # (used for bitfinex, for example, to get symbols which enabled only for v1)\n\n if not version:\n version = self.version\n version = str(version)\n\n if not self._converter_by_version:\n self._converter_by_version = {}\n if version in self._converter_by_version:\n return self._converter_by_version[version]\n\n # Get class\n converter_class = self._converter_class_by_version.get(version) \\\n if self._converter_class_by_version else self.default_converter_class\n # Note: platform_id could be set in converter or in client\n if not self.platform_id:\n self.platform_id = converter_class.platform_id\n # Create and store\n converter = converter_class(self.platform_id, version) if converter_class else None\n self._converter_by_version[version] = converter\n\n return converter\n\n def close(self):\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n\n\n# REST\n\nclass RESTConverter(ProtocolConverter):\n # sorting values: ASCENDING, DESCENDING (newest first), None\n # DEFAULT_SORTING = Param.ASCENDING # Const for current platform. See in param_name_lookup\n IS_SORTING_ENABLED = False # False - SORTING param is not supported for current platform\n sorting = Sorting.DESCENDING # Choose default sorting for all requests\n\n secured_endpoints = [Endpoint.ACCOUNT, Endpoint.TRADE_MY,\n Endpoint.ORDER, Endpoint.ORDER_TEST, Endpoint.ORDER_MY, Endpoint.ORDER_CURRENT]\n\n # endpoint -> endpoint (if has different endpoint for history)\n history_endpoint_lookup = {\n Endpoint.TRADE: Endpoint.TRADE_HISTORY,\n }\n\n # endpoint -> platform_endpoint\n endpoint_lookup = None\n max_limit_by_endpoint = None\n\n @property\n def default_sorting(self):\n # Default sorting for current platform if no sorting param is specified\n return self._get_platform_param_value(Sorting.DEFAULT_SORTING)\n\n def preprocess_params(self, endpoint, params):\n self._process_limit_param(endpoint, params)\n self._process_sorting_param(endpoint, params)\n # Must be after sorting added\n self._process_from_item_param(endpoint, params)\n return params\n\n def _process_limit_param(self, endpoint, params):\n # (If LIMIT param is set to None (expected, but not defined))\n is_use_max_limit = self.is_use_max_limit or (params.get(ParamName.IS_USE_MAX_LIMIT, False) if params else False)\n is_limit_supported_here = params and ParamName.LIMIT in params\n if is_use_max_limit and is_limit_supported_here and params[ParamName.LIMIT] is None:\n value = self.max_limit_by_endpoint.get(endpoint, 1000000) if self.max_limit_by_endpoint else None\n if value is not None:\n # Set limit to maximum supported by a platform\n params[ParamName.LIMIT] = value\n\n def _process_sorting_param(self, endpoint, params):\n # (Add only if a platform supports it, and it is not already added)\n\n if not self.IS_SORTING_ENABLED and ParamName.SORTING in params:\n del params[ParamName.SORTING]\n elif self.IS_SORTING_ENABLED and not params.get(ParamName.SORTING):\n params[ParamName.SORTING] = self.sorting\n\n def _get_real_sorting(self, params):\n sorting = params.get(ParamName.SORTING) if params else None\n return sorting or self.default_sorting\n\n def _process_from_item_param(self, endpoint, params):\n from_item = params.get(ParamName.FROM_ITEM)\n if not from_item or not params: # or not self.IS_SORTING_ENABLED:\n return\n\n to_item = params.get(ParamName.TO_ITEM)\n is_descending = self._get_real_sorting(params) == Sorting.DESCENDING\n\n # (from_item <-> to_item)\n # is_from_newer_than_to = getattr(from_item, self.ITEM_TIMESTAMP_ATTR, 0) > \\\n # getattr(to_item, self.ITEM_TIMESTAMP_ATTR, 0)\n is_from_newer_than_to = (from_item.timestamp or 0) > (to_item.timestamp or 0)\n if from_item and to_item and is_from_newer_than_to:\n params[ParamName.FROM_ITEM] = to_item\n params[ParamName.TO_ITEM] = from_item\n\n # (from_item -> to_item)\n if is_descending and not to_item:\n params[ParamName.TO_ITEM] = from_item\n del params[ParamName.FROM_ITEM]\n\n def process_secured(self, endpoint, platform_params, api_key, api_secret):\n if endpoint in self.secured_endpoints:\n platform_params = self._generate_and_add_signature(platform_params, api_key, api_secret)\n return platform_params\n\n def _generate_and_add_signature(self, platform_params, api_key, api_secret):\n # Generate and add signature here\n return platform_params\n\n def post_process_result(self, method, endpoint, params, result):\n # Process result using request data\n\n if isinstance(result, Error):\n return result\n\n # (Symbol and interval are often not returned in response, so we have to set it here)\n # symbol = params.get(ParamName.SYMBOL) if params else None\n # if symbol:\n # if isinstance(result, list):\n # for item in result:\n # if hasattr(item, ParamName.SYMBOL):\n # item.symbol = symbol\n # else:\n # if hasattr(result, ParamName.SYMBOL):\n # result.symbol = symbol\n self._propagate_param_to_result(ParamName.SYMBOL, params, result)\n self._propagate_param_to_result(ParamName.INTERVAL, params, result)\n\n return result\n\n def _propagate_param_to_result(self, param_name, params, result):\n value = params.get(param_name) if params else None\n if value:\n if isinstance(result, list):\n for item in result:\n if hasattr(item, param_name):\n setattr(item, param_name, value)\n else:\n if hasattr(result, param_name):\n setattr(result, param_name, value)\n\n\nclass BaseRESTClient(BaseClient):\n # Settings:\n _log_prefix = \"RESTClient\"\n\n default_converter_class = RESTConverter\n\n # State:\n delay_before_next_request_sec = 0\n\n session = None\n _last_response_for_debugging = None\n\n @property\n def headers(self):\n return {\n \"Accept\": \"application/json\",\n \"User-Agent\": \"client/python\",\n }\n\n def __init__(self, version=None, **kwargs) -> None:\n super().__init__(version, **kwargs)\n\n self.session = requests.session()\n\n def close(self):\n if self.session:\n self.session.close()\n\n def _send(self, method, endpoint, params=None, version=None, **kwargs):\n converter = self.get_or_create_converter(version)\n\n # Prepare\n params = dict(**kwargs, **(params or {}))\n params = converter.preprocess_params(endpoint, params)\n url, platform_params = converter.make_url_and_platform_params(endpoint, params, version=version)\n platform_params = converter.process_secured(endpoint, platform_params, self._api_key, self._api_secret)\n if not url:\n return None\n\n # Send\n kwargs = {\"headers\": self.headers}\n params_name = \"params\" if method.lower() == \"get\" else \"data\"\n kwargs[params_name] = platform_params\n self.logger.info(\"Send: %s %s %s\", method, url, platform_params)\n response = self.session.request(method, url, **kwargs)\n\n # Parse\n self._last_response_for_debugging = response\n if response.ok:\n result = converter.parse(endpoint, response.json())\n result = converter.post_process_result(method, endpoint, params, result)\n else:\n is_json = \"json\" in response.headers.get(\"content-type\", \"\")\n result = converter.parse_error(response.json() if is_json else None, response)\n self.logger.info(\"Response: %s Parsed result: %s %s\", response,\n len(result) if isinstance(result, list) else \"\",\n str(result)[:100] + \" ... \" + str(result)[-100:])\n self._on_response(response, result)\n\n # Return parsed value objects or Error instance\n return result\n\n def _on_response(self, response, result):\n pass\n\n\nclass PlatformRESTClient(BaseRESTClient):\n \"\"\"\n Important! Behavior when some param is None or for any other case should be same for all platforms.\n Important! from and to params must be including: [from, to], not [from, to) or (from, to).\n\n Закомментированные методы скорее всего не понадобятся, но на всякий случай они добавлены,\n чтобы потом не возвращаться и не думать заново.\n \"\"\"\n _server_time_diff_s = None\n\n def ping(self, version=None, **kwargs):\n endpoint = Endpoint.PING\n return self._send(\"GET\", endpoint, version=version, **kwargs)\n\n def get_server_timestamp(self, force_from_server=False, version=None, **kwargs):\n endpoint = Endpoint.SERVER_TIME\n\n if not force_from_server and self._server_time_diff_s is not None:\n # (Calculate using time difference with server taken from previous call)\n result = self._server_time_diff_s + time.time()\n return int(result * 1000) if self.use_milliseconds else result\n\n time_before = time.time()\n\n result = self._send(\"GET\", endpoint, version=version, **kwargs)\n if isinstance(result, Error):\n return result\n\n # (Update time diff)\n self._server_time_diff_s = (result / 1000 if self.use_milliseconds else result) - time_before\n return result\n\n def get_symbols(self, version=None, **kwargs):\n endpoint = Endpoint.SYMBOLS\n return self._send(\"GET\", endpoint, version=version, **kwargs)\n\n def fetch_history(self, endpoint, symbol, limit=None, from_item=None, to_item=None,\n sorting=None, is_use_max_limit=False, from_time=None, to_time=None,\n version=None, **kwargs):\n # Common method for fetching history for any endpoint. Used in REST connector.\n\n # (Convert endpoint to history endpoint if they differ)\n history_endpoint_lookup = self.converter.history_endpoint_lookup\n endpoint = history_endpoint_lookup.get(endpoint, endpoint) if history_endpoint_lookup else endpoint\n params = {\n ParamName.SYMBOL: symbol,\n ParamName.LIMIT: limit,\n ParamName.FROM_ITEM: from_item,\n ParamName.TO_ITEM: to_item,\n ParamName.SORTING: sorting,\n ParamName.IS_USE_MAX_LIMIT: is_use_max_limit,\n ParamName.FROM_TIME: from_time,\n ParamName.TO_TIME: to_time,\n }\n\n self.logger.debug(\"fetch_history from: %s to: %s\", from_item or from_time, to_item or to_time)\n result = self._send(\"GET\", endpoint, params, version, **kwargs)\n return result\n\n # Trade\n\n def fetch_trades(self, symbol, limit=None, version=None, **kwargs):\n # Fetch current (last) trades to display at once.\n\n endpoint = Endpoint.TRADE\n params = {\n ParamName.SYMBOL: symbol,\n ParamName.LIMIT: limit,\n }\n\n result = self._send(\"GET\", endpoint, params, version, **kwargs)\n return result\n\n def fetch_trades_history(self, symbol, limit=None, from_item=None, to_item=None,\n sorting=None, is_use_max_limit=False, from_time=None, to_time=None,\n version=None, **kwargs):\n # Fetching whole trades history as much as possible.\n # from_time and to_time used along with from_item and to_item as we often need to fetch\n # history by time and only Binance (as far as I know) doesn't support that (only by id)\n\n return self.fetch_history(Endpoint.TRADE, symbol, limit, from_item, to_item,\n sorting, is_use_max_limit, from_time, to_time,\n version, **kwargs)\n\n # Candle\n\n def fetch_candles(self, symbol, interval, limit=None, from_time=None, to_time=None,\n is_use_max_limit=False, version=None, **kwargs):\n endpoint = Endpoint.CANDLE\n params = {\n ParamName.SYMBOL: symbol,\n ParamName.INTERVAL: interval,\n ParamName.LIMIT: limit,\n ParamName.FROM_TIME: from_time,\n ParamName.TO_TIME: to_time,\n ParamName.IS_USE_MAX_LIMIT: is_use_max_limit,\n }\n\n result = self._send(\"GET\", endpoint, params, version, **kwargs)\n return result\n\n # Ticker\n\n def fetch_ticker(self, symbol=None, version=None, **kwargs):\n endpoint = Endpoint.TICKER\n params = {\n ParamName.SYMBOL: symbol,\n }\n\n result = self._send(\"GET\", endpoint, params, version, **kwargs)\n return result\n\n def fetch_tickers(self, symbols=None, version=None, **kwargs):\n endpoint = Endpoint.TICKER\n # (Send None for all symbols)\n # params = {\n # ParamName.SYMBOLS: None,\n # }\n\n result = self._send(\"GET\", endpoint, None, version, **kwargs)\n\n if symbols:\n # Filter result for symbols defined\n symbols = [symbol.upper() if symbol else symbol for symbol in symbols]\n return [item for item in result if item.symbol in symbols]\n\n return result\n\n # Order Book\n\n def fetch_order_book(self, symbol=None, limit=None, is_use_max_limit=False,\n version=None, **kwargs):\n # Level 2 (price-aggregated) order book for a particular symbol.\n\n endpoint = Endpoint.ORDER_BOOK\n params = {\n ParamName.SYMBOL: symbol,\n ParamName.LIMIT: limit,\n }\n\n result = self._send(\"GET\", endpoint, params, version, **kwargs)\n return result\n\n # def fetch_order_book_L2_L3(self, symbol=None, limit=None, version=None, **kwargs):\n # # Fetch L2/L3 order book (with all orders enlisted) for a particular market trading symbol.\n # pass\n\n\nclass PrivatePlatformRESTClient(PlatformRESTClient):\n\n def __init__(self, api_key=None, api_secret=None, version=None, **kwargs) -> None:\n super().__init__(version=version, **kwargs)\n\n self._api_key = api_key\n self._api_secret = api_secret\n\n # Trades\n\n def fetch_account_info(self, version=None, **kwargs):\n # Balance included to account\n endpoint = Endpoint.ACCOUNT\n params = {}\n\n result = self._send(\"GET\", endpoint, params, version or \"3\", **kwargs)\n return result\n\n def fetch_my_trades(self, symbol, limit=None, version=None, **kwargs):\n endpoint = Endpoint.TRADE_MY\n params = {\n ParamName.SYMBOL: symbol,\n ParamName.LIMIT: limit,\n }\n\n result = self._send(\"GET\", endpoint, params, version or \"3\", **kwargs)\n return result\n\n # def fetch_my_trades_history(self, symbol, limit=None, from_item=None, to_item=None,\n # sorting=None, is_use_max_limit=False, version=None, **kwargs):\n # pass\n\n # Order (private)\n\n def create_order(self, symbol, order_type, direction, price=None, amount=None, is_test=False,\n version=None, **kwargs):\n endpoint = Endpoint.ORDER_TEST if is_test else Endpoint.ORDER\n\n # if order_type != OrderType.MARKET:\n # price = None\n params = {\n ParamName.SYMBOL: symbol,\n ParamName.ORDER_TYPE: order_type,\n ParamName.DIRECTION: direction,\n ParamName.PRICE: price if order_type == OrderType.LIMIT else None,\n ParamName.AMOUNT: amount,\n }\n\n result = self._send(\"POST\", endpoint, params, version=version or \"3\", **kwargs)\n return result\n\n def cancel_order(self, order, symbol=None, version=None, **kwargs):\n endpoint = Endpoint.ORDER\n params = {\n # ParamName.ORDER_ID: order.item_id if isinstance(order, Order) else order,\n ParamName.ORDER_ID: order,\n ParamName.SYMBOL: symbol, # move to converter(?): or (order.symbol if hasattr(order, ParamName.SYMBOL) else None),\n }\n\n result = self._send(\"DELETE\", endpoint, params, version or \"3\", **kwargs)\n return result\n\n # was fetch_order\n def check_order(self, order, symbol=None, version=None, **kwargs): # , direction=None\n # item_id should be enough, but some platforms also need symbol and direction\n endpoint = Endpoint.ORDER\n params = {\n ParamName.SYMBOL: symbol,\n ParamName.ORDER_ID: order,\n # ParamName.: ,\n }\n\n result = self._send(\"GET\", endpoint, params, version or \"3\", **kwargs)\n return result\n\n def fetch_orders(self, symbol=None, limit=None, from_item=None, is_open=False,\n version=None, **kwargs): # , order_status=None\n endpoint = Endpoint.ORDER_CURRENT if is_open else Endpoint.ORDER_MY\n params = {\n ParamName.SYMBOL: symbol,\n # ParamName.: ,\n ParamName.LIMIT: limit,\n ParamName.FROM_ITEM: from_item,\n # ParamName.: ,\n }\n\n result = self._send(\"GET\", endpoint, params, version or \"3\", **kwargs)\n return result\n\n\n# WebSocket\n\nclass WSConverter(ProtocolConverter):\n # Main params:\n # False - Subscribing by connecting URL: BitMEX, Binance.\n # True - Subscribing by command: Bitfinex (v1, v2).\n IS_SUBSCRIPTION_COMMAND_SUPPORTED = True\n\n # supported_endpoints = None\n # symbol_endpoints = None # In subclass you can call REST API to get symbols\n supported_endpoints = [Endpoint.TRADE, Endpoint.CANDLE, Endpoint.TICKER, Endpoint.TICKER_ALL,\n Endpoint.ORDER_BOOK, Endpoint.ORDER_BOOK_DIFF]\n symbol_endpoints = [Endpoint.TRADE, Endpoint.CANDLE,\n Endpoint.TICKER, # can be used as symbol and as generic endpoint\n Endpoint.ORDER_BOOK, Endpoint.ORDER_BOOK_DIFF]\n # generic_endpoints = None # = supported_endpoints.difference(symbol_endpoints)\n supported_symbols = None\n\n # Converting info:\n # For converting to platform\n\n # For parsing from platform\n event_type_param = None\n endpoint_by_event_type = None\n item_class_by_endpoint = dict(**ProtocolConverter.item_class_by_endpoint, **{\n # # Item class by event type\n # \"error\": Error,\n # \"info\": Info,\n # \"subscribed\": Channel,\n })\n\n # For converting time\n\n @property\n def generic_endpoints(self):\n # Non-symbol endpoints\n return self.supported_endpoints.difference(self.symbol_endpoints or set()) \\\n if self.supported_endpoints else set()\n\n def generate_subscriptions(self, endpoints, symbols, **params):\n result = set()\n for endpoint in endpoints:\n if endpoint in self.symbol_endpoints:\n if symbols:\n for symbol in symbols:\n result.add(self._generate_subscription(endpoint, symbol, **params))\n else:\n result.add(self._generate_subscription(endpoint, None, **params))\n else:\n result.add(self._generate_subscription(endpoint, **params))\n return result\n\n def _generate_subscription(self, endpoint, symbol=None, **params):\n channel = self._get_platform_endpoint(endpoint, {ParamName.SYMBOL: symbol, **params})\n return channel\n\n def parse(self, endpoint, data):\n # (Get endpoint from event type)\n if not endpoint and data and isinstance(data, dict) and self.event_type_param:\n event_type = data.get(self.event_type_param, endpoint)\n endpoint = self.endpoint_by_event_type.get(event_type, event_type) \\\n if self.endpoint_by_event_type else event_type\n # if not endpoint:\n # self.logger.error(\"Cannot find event type by name: %s in data: %s\", self.event_type_param, data)\n # self.logger.debug(\"Endpoint: %s by name: %s in data: %s\", endpoint, self.event_type_param, data)\n\n return super().parse(endpoint, data)\n\n\nclass WSClient(BaseClient):\n \"\"\"\n Using:\n client = WSClient(api_key, api_secret)\n client.subscribe([Endpoint.TRADE], [\"ETHUSD\", \"ETHBTC\"])\n # (Will reconnect for platforms which needed that)\n client.subscribe([Endpoint.TRADE], [\"ETHBTC\", \"ETHUSD\"])\n # Resulting subscriptions: [Endpoint.TRADE] channel for symbols:\n # [\"ETHUSD\", \"ETHBTC\", \"ETHBTC\", \"ETHUSD\"]\n \"\"\"\n # Settings:\n _log_prefix = \"WSClient\"\n\n default_converter_class = WSConverter\n\n is_auto_reconnect = True\n reconnect_delay_sec = 3\n reconnect_count = 3\n\n on_connect = None\n on_data = None\n on_data_item = None\n on_disconnect = None\n\n # State:\n # Subscription sets\n endpoints = None\n symbols = None\n # endpoints + symbols = subscriptions\n current_subscriptions = None\n pending_subscriptions = None\n successful_subscriptions = None\n failed_subscriptions = None\n is_subscribed_with_url = False\n\n # Connection\n is_started = False\n _is_reconnecting = True\n _reconnect_tries = 0\n ws = None\n thread = None\n _data_buffer = None\n\n @property\n def url(self):\n # Override if you need to introduce some get params\n # (Set self.is_subscribed_with_url=True if subscribed in here in URL)\n url, platform_params = self.converter.make_url_and_platform_params()\n return url if self.converter else \"\"\n\n @property\n def is_connected(self):\n return self.ws.sock.connected if self.ws and self.ws.sock else False\n\n def __init__(self, api_key=None, api_secret=None, version=None, **kwargs) -> None:\n super().__init__(version, **kwargs)\n self._api_key = api_key\n self._api_secret = api_secret\n\n # (For convenience)\n self.IS_SUBSCRIPTION_COMMAND_SUPPORTED = self.converter.IS_SUBSCRIPTION_COMMAND_SUPPORTED\n\n # Subscription\n\n def subscribe(self, endpoints=None, symbols=None, **params):\n \"\"\"\n Subscribe and connect.\n\n None means all: all previously subscribed or (if none) all supported.\n\n subscribe() # subscribe to all supported endpoints (currently only generic ones)\n unsubscribe() # unsubscribe all\n subscribe(symbols=[\"BTCUSD\"]) # subscribe to all supported endpoints for \"BTCUSD\"\n unsubscribe(endpoints=[\"TRADE\"]) # unsubscribe all \"TRADE\" channels - for all symbols\n unsubscribe() # unsubscribe all (except \"TRADE\" which has been already unsubscribed before)\n\n subscribe(endpoints=[\"TRADE\"], symbols=[\"BTCUSD\"]) # subscribe to all supported endpoints for \"BTCUSD\"\n unsubscribe() # unsubscribe all \"TRADE\" channels\n subscribe() # subscribe to all \"TRADE\" channels back because it was directly\n unsubscribe(endpoints=[\"TRADE\"]) # unsubscribe all \"TRADE\" channels directly (currently only for \"BTCUSD\")\n subscribe() # subscribe all supported channels for symbol \"BTCUSD\" (as this symbol wasn't unsubscribed directly)\n unsubscribe(symbols=[\"BTCUSD\"]) # unsubscribe all channels for \"BTCUSD\"\n\n :param endpoints:\n :param symbols:\n :return:\n \"\"\"\n\n self.logger.debug(\"Subscribe on endpoints: %s and symbols: %s prev: %s %s\",\n endpoints, symbols, self.endpoints, self.symbols)\n # if not endpoints and not symbols:\n # subscriptions = self.prev_subscriptions\n # else:\n if not endpoints:\n endpoints = self.endpoints or self.converter.supported_endpoints\n else:\n endpoints = set(endpoints).intersection(self.converter.supported_endpoints)\n self.endpoints = self.endpoints.union(endpoints) if self.endpoints else endpoints\n if not symbols:\n symbols = self.symbols or self.converter.supported_symbols\n else:\n self.symbols = self.symbols.union(symbols) if self.symbols else set(symbols)\n if not endpoints:\n return\n\n subscriptions = self.converter.generate_subscriptions(endpoints, symbols, **params)\n\n self.current_subscriptions = self.current_subscriptions.union(subscriptions) \\\n if self.current_subscriptions else subscriptions\n\n self._subscribe(subscriptions)\n\n def unsubscribe(self, endpoints=None, symbols=None, **params):\n # None means \"all\"\n\n self.logger.debug(\"Subscribe from endpoints: %s and symbols: %s\", endpoints, symbols)\n subscribed = self.pending_subscriptions.union(self.successful_subscriptions or set()) \\\n if self.pending_subscriptions else set()\n if not endpoints and not symbols:\n subscriptions = self.current_subscriptions.copy()\n\n # if self.current_subscriptions:\n # self.prev_subscriptions = self.current_subscriptions\n self.current_subscriptions.clear()\n self.failed_subscriptions.clear()\n self.pending_subscriptions.clear()\n self.successful_subscriptions.clear()\n else:\n if not endpoints:\n endpoints = self.endpoints\n else:\n self.endpoints = self.endpoints.difference(endpoints) if self.endpoints else set()\n if not symbols:\n symbols = self.symbols\n else:\n self.symbols = self.symbols.difference(symbols) if self.symbols else set()\n if not endpoints:\n return\n\n subscriptions = self.converter.generate_subscriptions(endpoints, symbols, **params)\n\n self.current_subscriptions = self.current_subscriptions.difference(subscriptions)\n self.failed_subscriptions = self.failed_subscriptions.difference(subscriptions)\n self.pending_subscriptions = self.pending_subscriptions.difference(subscriptions)\n self.successful_subscriptions = self.successful_subscriptions.difference(subscriptions)\n\n self._unsubscribe(subscriptions.intersection(subscribed))\n\n def resubscribe(self):\n self.logger.debug(\"Resubscribe all current subscriptions\")\n # Unsubscribe & subscribe all\n if self.IS_SUBSCRIPTION_COMMAND_SUPPORTED:\n # Send unsubscribe all and subscribe all back again not interrupting a connection\n self.unsubscribe()\n self.subscribe()\n else:\n # Platforms which subscribe in WS URL need reconnection\n self.reconnect()\n\n def _subscribe(self, subscriptions):\n # Call subscribe command with \"subscriptions\" param or reconnect with\n # \"self.current_subscriptions\" in URL - depending on platform\n self.logger.debug(\" Subscribe to subscriptions: %s\", subscriptions)\n if not self.is_started or not self.IS_SUBSCRIPTION_COMMAND_SUPPORTED:\n # Connect on first subscribe() or reconnect on the further ones\n self.reconnect()\n else:\n self._send_subscribe(subscriptions)\n\n def _unsubscribe(self, subscriptions):\n # Call unsubscribe command with \"subscriptions\" param or reconnect with\n # \"self.current_subscriptions\" in URL - depending on platform\n self.logger.debug(\" Subscribe from subscriptions: %s\", subscriptions)\n if not self.is_started or not self.IS_SUBSCRIPTION_COMMAND_SUPPORTED:\n self.reconnect()\n else:\n self._send_unsubscribe(subscriptions)\n\n def _send_subscribe(self, subscriptions):\n # Implement in subclass\n pass\n\n def _send_unsubscribe(self, subscriptions):\n # Implement in subclass\n pass\n\n # Connection\n\n def connect(self, version=None):\n # Check ready\n if not self.current_subscriptions:\n self.logger.warning(\"Please subscribe before connect.\")\n return\n\n # Do nothing if was called before\n if self.ws and self.is_started:\n self.logger.warning(\"WebSocket is already started.\")\n return\n\n # Connect\n if not self.ws:\n self.ws = WebSocketApp(self.url, header=self.headers,\n on_open=self._on_open,\n on_message=self._on_message,\n on_error=self._on_error,\n on_close=self._on_close)\n else:\n self.ws.url = self.url\n self.ws.header = self.headers\n\n # (run_forever() will raise an exception if previous socket is still not closed)\n self.logger.debug(\"Start WebSocket with url: %s\" % self.ws.url)\n self.is_started = True\n self.thread = Thread(target=self.ws.run_forever)\n self.thread.daemon = True\n self.thread.start()\n\n def reconnect(self):\n self.logger.debug(\"Reconnect WebSocket\")\n self.close()\n self.connect()\n\n def close(self):\n if not self.is_started:\n # Nothing to close\n return\n\n self.logger.debug(\"Close WebSocket\")\n # (If called directly or from _on_close())\n self.is_started = False\n if self.is_connected:\n # (If called directly)\n self.ws.close()\n\n super().close()\n\n def _on_open(self):\n self.logger.debug(\"On open. %s\", \"Connected.\" if self.is_connected else \"NOT CONNECTED. It's impossible!\")\n\n # (Stop reconnecting)\n self._is_reconnecting = False\n self._reconnect_tries = 0\n\n if self.on_connect:\n self.on_connect()\n\n # Subscribe by command on connect\n if self.IS_SUBSCRIPTION_COMMAND_SUPPORTED and not self.is_subscribed_with_url:\n self.subscribe()\n\n def _on_message(self, message):\n self.logger.debug(\"On message: %s\", message[:200])\n # str -> json\n try:\n data = json.loads(message)\n except json.JSONDecodeError:\n self.logger.error(\"Wrong JSON is received! Skipped. message: %s\", message)\n return\n\n # json -> items\n result = self._parse(None, data)\n\n # Process items\n self._data_buffer = []\n\n if result and isinstance(result, list):\n for item in result:\n self.on_item_received(item)\n else:\n self.on_item_received(result)\n\n if self.on_data and self._data_buffer:\n self.on_data(self._data_buffer)\n\n def _parse(self, endpoint, data):\n if data and isinstance(data, list):\n return [self.converter.parse(endpoint, data_item) for data_item in data]\n return self.converter.parse(endpoint, data)\n\n def on_item_received(self, item):\n # To skip empty and unparsed data\n if self.on_data_item and isinstance(item, DataObject):\n self.on_data_item(item)\n self._data_buffer.append(item)\n\n def _on_error(self, error_exc):\n self.logger.exception(\"On error exception from websockets: %s\", error_exc)\n pass\n\n def _on_close(self):\n self.logger.info(\"On WebSocket close\")\n\n if self.on_disconnect:\n self.on_disconnect()\n\n if self.is_started or (self._is_reconnecting and self._reconnect_tries < self.reconnect_count):\n self._is_reconnecting = True\n if self._reconnect_tries == 0:\n # Don't wait before the first reconnection try\n time.sleep(self.reconnect_delay_sec)\n self._reconnect_tries += 1\n self.reconnect()\n return\n self._is_reconnecting = False\n\n self.close()\n\n def _send(self, data):\n if not data:\n return\n\n message = json.dumps(data)\n self.logger.debug(\"Send message: %s\", message)\n self.ws.send(message)\n\n # Processing\n" }, { "alpha_fraction": 0.7073954939842224, "alphanum_fraction": 0.7188791632652283, "avg_line_length": 31.014705657958984, "blob_id": "955f8d4b6f3dbcad5fa9da3c1fca54846faf5952", "content_id": "fda3a065f53fd3b8270a167ad33cea4a6153a115", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2248, "license_type": "permissive", "max_line_length": 114, "num_lines": 68, "path": "/hyperquant/clients/tests/test_bitmex.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "from hyperquant.api import Platform\nfrom hyperquant.clients.bitmex import BitMEXRESTConverterV1, BitMEXRESTClient, BitMEXWSClient, BitMEXWSConverterV1\nfrom hyperquant.clients.tests.test_init import TestRESTClient, TestWSClient, TestConverter, TestRESTClientHistory\n\n\n# TODO check https://www.bitmex.com/app/restAPI \"Обратите внимание: все суммы в биткойнах при\n# возврате запроса указываются в Satoshi: 1 XBt (Satoshi) = 0,00000001 XBT (биткойн).\"\n\n# REST\n\nclass TestBitMEXRESTConverterV1(TestConverter):\n converter_class = BitMEXRESTConverterV1\n\n\nclass TestBitMEXRESTClientV1(TestRESTClient):\n platform_id = Platform.BITMEX\n version = \"1\"\n # testing_symbol = \"XBTUSD\"\n testing_symbol = None # BitMEX returns all symbols if symbol param is not specified\n testing_symbol2 = \"XBTUSD\"\n\n is_sorting_supported = True\n\n has_limit_error = True\n is_symbol_case_sensitive = True\n\n\nclass TestBitMEXRESTClientHistoryV1(TestRESTClientHistory):\n platform_id = Platform.BITMEX\n version = \"1\"\n # testing_symbol = \"XBTUSD\"\n testing_symbol = None # BitMEX returns all symbols if symbol param is not specified\n testing_symbol2 = \"XBTUSD\"\n\n is_sorting_supported = True\n\n has_limit_error = True\n is_symbol_case_sensitive = True\n\n def test_fetch_trades_errors(self, method_name=\"fetch_trades\", is_auth=False):\n client = self.client_authed if is_auth else self.client\n\n # Wrong symbol\n result = getattr(client, method_name)(self.wrong_symbol)\n\n # Empty list instead of error (todo check, may be we should create error for each empty list returned)\n self.assertEqual(result, [])\n\n if self.is_symbol_case_sensitive:\n # Symbol in lower case as wrong symbol\n result = getattr(client, method_name)(self.testing_symbol2.lower())\n\n self.assertIsNotNone(result)\n self.assertEqual(result, [])\n\n\n# WebSocket\n\nclass TestBitMEXWSConverterV1(TestConverter):\n converter_class = BitMEXWSConverterV1\n\n\nclass TestBitMEXWSClientV1(TestWSClient):\n platform_id = Platform.BITMEX\n version = \"1\"\n\n testing_symbol = \"XBTUSD\"\n testing_symbols = [\"ETHUSD\", \"XBTUSD\"]\n" }, { "alpha_fraction": 0.6159995198249817, "alphanum_fraction": 0.6225722432136536, "avg_line_length": 29.81751823425293, "blob_id": "485f4feafd639d67a886597f164f607a52fa6066", "content_id": "d0c52c65c51a3c569bbfddd9d75dd6c8435ca27d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17045, "license_type": "permissive", "max_line_length": 127, "num_lines": 548, "path": "/hyperquant/api.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "from collections import Iterable\nfrom decimal import Decimal\n\nfrom clickhouse_driver.errors import ServerException\nfrom dateutil import parser\nfrom django.http import JsonResponse\n\n\"\"\"\nCommon out API format is defined here.\n\nWhen we calling any other platform API like Binance or Bitfinex we convert \nall response data to this format. \nWhen anyone calling our REST API this format is used too.\n\"\"\"\n\n\n# Trading platforms, REST API, and DB:\n\n# Constants\n\nclass Platform:\n BINANCE = 1\n BITFINEX = 2\n BITMEX = 3\n\n name_by_id = {\n 1: \"BINANCE\",\n 2: \"BITFINEX\",\n 3: \"BITMEX\",\n }\n id_by_name = {v: k for k, v in name_by_id.items()}\n\n @classmethod\n def get_platform_name_by_id(cls, platform_id):\n return cls.name_by_id.get(platform_id)\n\n @classmethod\n def get_platform_id_by_name(cls, platform, is_check_valid_id=False):\n # platform - name or id, all other values will be converted to None\n if isinstance(platform, str) and platform.isnumeric():\n platform = int(platform)\n return cls.id_by_name.get(str(platform).upper(),\n platform if not is_check_valid_id or platform in cls.name_by_id else None)\n\n\nclass Endpoint:\n # Note: you can use any value, but remember they will be used in all our APIs,\n # and they must differ from each other\n\n # ALL = \"*\" # Used by WS Client\n\n # For all platforms and our REST API (except *_HISTORY)\n PING = \"ping\"\n SERVER_TIME = \"time\"\n SYMBOLS = \"symbols\"\n TRADE = \"trade\"\n TRADE_HISTORY = \"trade/history\"\n TRADE_MY = \"trade/my\" # Private\n CANDLE = \"candle\"\n # CANDLE_HISTORY = \"candle/history\"\n TICKER = \"ticker\"\n TICKER_ALL = \"ticker_all\"\n # TICKER_HISTORY = \"ticker/history\"\n ORDER_BOOK = \"orderbook\"\n # ORDER_BOOK_HISTORY = \"orderbook/history\"\n ORDER_BOOK_DIFF = \"orderbook\" # WS\n\n # Private\n ACCOUNT = \"account\"\n ORDER = \"order\"\n ORDER_TEST = \"order/test\"\n ORDER_CURRENT = \"order/current\"\n ORDER_MY = \"order/my\"\n # ORDER_HISTORY = \"order/history\"\n\n # For our REST API only\n ITEM = \"\"\n HISTORY = \"/history\"\n FORMAT = \"/format\"\n\n ALL = [SERVER_TIME, SYMBOLS, TRADE, TRADE_HISTORY, TRADE_MY, CANDLE, TICKER, TICKER_ALL,\n ORDER_BOOK, ORDER_BOOK_DIFF, ACCOUNT, ORDER, ORDER_TEST, ORDER_CURRENT, ORDER_MY,\n ITEM, HISTORY, FORMAT]\n\n\nclass ParamName:\n # Stores names which are used:\n # 1. in params of client.send() method;\n # 2. in value object classes!;\n # 3. field names in DB;\n # 4. in our REST APIs.\n\n ID = \"id\"\n ITEM_ID = \"item_id\"\n TRADE_ID = \"trade_id\"\n ORDER_ID = \"order_id\"\n USER_ORDER_ID = \"user_order_id\"\n\n SYMBOL = \"symbol\"\n SYMBOLS = \"symbols\" # For our REST API only\n LIMIT = \"limit\"\n IS_USE_MAX_LIMIT = \"is_use_max_limit\" # used in clients only\n LIMIT_SKIP = \"limit_skip\"\n PAGE = \"page\" # instead of LIMIT_SKIP\n SORTING = \"sorting\"\n INTERVAL = \"interval\"\n DIRECTION = \"direction\" # Sell/buy or ask/bid\n ORDER_TYPE = \"order_type\"\n ORDER_STATUS = \"order_status\"\n LEVEL = \"level\" # For order book (WS)\n TRADES_COUNT = \"trades_count\"\n\n TIMESTAMP = \"timestamp\"\n FROM_ITEM = \"from_item\"\n TO_ITEM = \"to_item\"\n FROM_TIME = \"from_time\"\n TO_TIME = \"to_time\"\n FROM_PRICE = \"from_price\"\n TO_PRICE = \"to_price\"\n FROM_AMOUNT = \"from_amount\"\n TO_AMOUNT = \"to_amount\"\n\n PRICE_OPEN = \"price_open\"\n PRICE_CLOSE = \"price_close\"\n PRICE_HIGH = \"price_high\"\n PRICE_LOW = \"price_low\"\n PRICE = \"price\"\n AMOUNT_ORIGINAL = \"amount_original\"\n AMOUNT_EXECUTED = \"amount_executed\"\n AMOUNT_AVAILABLE = \"amount_available\"\n AMOUNT_RESERVED = \"amount_reserved\"\n AMOUNT = \"amount\"\n FEE = \"fee\"\n REBATE = \"rebate\"\n BALANCES = \"balances\"\n ASKS = \"asks\"\n BIDS = \"bids\"\n\n # For our REST API only\n PLATFORM_ID = \"platform_id\"\n PLATFORM = \"platform\" # (alternative)\n PLATFORMS = \"platforms\" # (alternative)\n # ENDPOINT = \"endpoint\"\n\n IS_SHORT = \"is_short\"\n\n ALL = [ID, ITEM_ID, TRADE_ID, ORDER_ID, USER_ORDER_ID,\n LIMIT, IS_USE_MAX_LIMIT, LIMIT_SKIP, PAGE, SORTING,\n SYMBOL, SYMBOLS, DIRECTION, INTERVAL, ORDER_TYPE, LEVEL,\n TIMESTAMP, FROM_ITEM, TO_ITEM, FROM_TIME, TO_TIME,\n FROM_PRICE, TO_PRICE, FROM_AMOUNT, TO_AMOUNT,\n PRICE_OPEN, PRICE_CLOSE, PRICE_HIGH, PRICE_LOW, PRICE,\n AMOUNT_ORIGINAL, AMOUNT_EXECUTED, AMOUNT, FEE, REBATE, BIDS, ASKS,\n PLATFORM_ID, PLATFORM, PLATFORMS, IS_SHORT]\n\n _timestamp_names = (TIMESTAMP, FROM_TIME, TO_TIME)\n _decimal_names = (PRICE, FROM_PRICE, TO_PRICE, AMOUNT, FROM_AMOUNT, TO_AMOUNT)\n\n @classmethod\n def is_timestamp(cls, name):\n return name in cls._timestamp_names\n\n @classmethod\n def is_decimal(cls, name):\n return name in cls._decimal_names\n\n\nclass ParamValue:\n # todo remove sometimes\n # param_names = [ParamName.SORTING]\n\n # For limit\n MIN = \"min\"\n MAX = \"max\"\n\n ALL = \"all\"\n UNDEFINED = None\n\n\nclass Sorting:\n ASCENDING = \"asc\" # Oldest first\n DESCENDING = \"desc\" # Newest first, usually default\n DEFAULT_SORTING = \"default_sorting\" # (For internal uses only)\n\n\nclass Interval:\n # For candles\n\n MIN_1 = \"1m\"\n MIN_3 = \"3m\"\n MIN_5 = \"5m\"\n MIN_15 = \"15m\"\n MIN_30 = \"30m\"\n HRS_1 = \"1h\"\n HRS_2 = \"2h\"\n HRS_4 = \"4h\"\n HRS_6 = \"6h\"\n HRS_8 = \"8h\"\n HRS_12 = \"12h\"\n DAY_1 = \"1d\"\n DAY_3 = \"3d\"\n WEEK_1 = \"1w\"\n MONTH_1 = \"1M\"\n\n ALL = [MIN_1, MIN_3, MIN_5, MIN_15, MIN_30,\n HRS_1, HRS_2, HRS_4, HRS_6, HRS_8, HRS_12,\n DAY_1, DAY_3, WEEK_1, MONTH_1]\n\n\nclass Direction:\n # (trade, order)\n\n SELL = 1\n BUY = 2\n # (for our REST API as alternative values)\n SELL_NAME = \"sell\"\n BUY_NAME = \"buy\"\n\n name_by_value = {\n SELL: SELL_NAME,\n BUY: BUY_NAME,\n }\n value_by_name = {v: k for k, v in name_by_value.items()}\n\n @classmethod\n def get_direction_value(cls, direction, is_check_valid_id=True):\n return cls.value_by_name.get(str(direction).upper(),\n direction if not is_check_valid_id or direction in cls.name_by_value else None)\n\n\nclass OrderBookDirection:\n # Direction for order book (same as sell/buy but with different names)\n ASK = 1 # Same as sell\n BID = 2 # Same as buy\n # (for our REST API as alternative values)\n ASK_NAME = \"ask\"\n BID_NAME = \"bid\"\n\n name_by_value = {\n ASK: ASK_NAME,\n BID: BID_NAME,\n }\n value_by_name = {v: k for k, v in name_by_value.items()}\n\n\nclass OrderType:\n LIMIT = 1\n MARKET = 2\n # (for our REST API)\n LIMIT_NAME = \"limit\"\n MARKET_NAME = \"market\"\n\n name_by_value = {\n LIMIT: LIMIT_NAME,\n MARKET: MARKET_NAME,\n }\n value_by_name = {v: k for k, v in name_by_value.items()}\n\n\nclass OrderStatus:\n OPEN = 1\n CLOSED = 0\n\n NEW = 2\n PARTIALLY_FILLED = 3\n FILLED = 4\n # PENDING_CANCEL = 5\n CANCELED = 6\n REJECTED = 7\n EXPIRED = 8\n\n # (for our REST API)\n OPEN_NAME = \"open\"\n CLOSED_NAME = \"closed\"\n\n NEW_NAME = \"new\"\n PARTIALLY_FILLED_NAME = \"partially_filled\"\n FILLED_NAME = \"filled\"\n # PENDING_CANCEL_NAME = \"pending_cancel\"\n CANCELED_NAME = \"canceled\"\n REJECTED_NAME = \"rejected\"\n EXPIRED_NAME = \"expired\"\n\n name_by_value = {\n OPEN: OPEN_NAME,\n CLOSED: CLOSED_NAME,\n\n NEW: NEW_NAME,\n PARTIALLY_FILLED: PARTIALLY_FILLED_NAME,\n FILLED: FILLED_NAME,\n # PENDING_CANCEL: PENDING_CANCEL_NAME,\n CANCELED: CANCELED_NAME,\n REJECTED: REJECTED_NAME,\n EXPIRED: EXPIRED_NAME,\n }\n value_by_name = {v: k for k, v in name_by_value.items()}\n\n\nclass ErrorCode:\n # Provides same error codes and messages for all trading platforms\n\n # Надо накопить достаточно типов ошибок, систематизировать их и дать им числовые коды,\n # которые будет легко мнемонически запомнить, чтобы поотм легко можно было определить ошибку по ее коду\n UNAUTHORIZED = \"any1\"\n RATE_LIMIT = \"any:ratelim\"\n IP_BAN = \"any:ipban\"\n WRONG_SYMBOL = \"any:wrsymbol\"\n WRONG_LIMIT = \"any:wrlimit\"\n WRONG_PARAM = \"any:wrparval\"\n APP_ERROR = \"any:apperr\"\n APP_DB_ERROR = \"any:appdberr\"\n\n message_by_code = {\n UNAUTHORIZED: \"Unauthorized. May be wrong api_key or api_secret or not defined at all.\",\n RATE_LIMIT: \"Rate limit reached. We must make a delay for a while.\",\n WRONG_SYMBOL: \"Wrong symbol. May be this symbol is not supported by platform or its name is wrong.\",\n WRONG_LIMIT: \"Wrong limit. May be too big.\",\n WRONG_PARAM: \"Wrong param value.\",\n APP_ERROR: \"App error!\",\n APP_DB_ERROR: \"App error! It's likely that app made wrong request to DB.\",\n }\n\n @classmethod\n def get_message_by_code(cls, code, default=None, **kwargs):\n return cls.message_by_code[code].format_map(kwargs) if code in cls.message_by_code else default or \"(no message: todo)\"\n\n\n# For DB, REST API\nitem_format_by_endpoint = {\n Endpoint.TRADE: [\n ParamName.PLATFORM_ID, ParamName.SYMBOL, ParamName.TIMESTAMP, ParamName.ITEM_ID,\n ParamName.PRICE, ParamName.AMOUNT, ParamName.DIRECTION\n ],\n}\n\n\n# REST API:\n\n# Parse request\n\ndef parse_platform_id(params):\n param_names = [ParamName.PLATFORM, ParamName.PLATFORMS, ParamName.PLATFORM_ID]\n for name in param_names:\n value = params.get(name)\n if value:\n return _convert_platform_id(value)\n return None\n\n\ndef parse_platform_ids(params):\n platforms = params.get(ParamName.PLATFORMS, None) or params.get(ParamName.PLATFORM)\n platforms = platforms.split(\",\") if isinstance(platforms, str) else platforms\n return [_convert_platform_id(p) for p in platforms] if platforms else None\n\n\ndef _convert_platform_id(platform):\n if platform is None:\n return None\n return int(platform) if platform.isnumeric() else Platform.id_by_name.get(platform.upper())\n\n\ndef parse_symbols(params):\n # None -> None\n # \"xxxzzz,yyyZZZ\" -> [\"XXXZZZ\", \"YYYZZZ\"]\n symbols = params.get(ParamName.SYMBOLS) or params.get(ParamName.SYMBOL)\n if symbols is None:\n return None\n return symbols.upper().split(\",\") if isinstance(symbols, str) else symbols\n\n\ndef parse_direction(params):\n # None -> None\n # \"Sell\" -> 1\n # \"BUY\" -> 2\n direction = params.get(ParamName.DIRECTION)\n if direction is None:\n return None\n direction = int(direction) if direction.isnumeric() else \\\n Direction.value_by_name.get(direction.lower())\n return direction if direction in (Direction.SELL, Direction.BUY) else None\n\n\ndef parse_timestamp(params, name):\n # Any time value to Unix timestamp in seconds\n time = params.get(name)\n if time is None:\n return None\n if time.isnumeric():\n return int(time)\n try:\n return float(time)\n except ValueError:\n return parser.parse(time).timestamp()\n\n\ndef parse_decimal(params, name):\n value = params.get(name)\n return Decimal(str(value)) if value is not None else None\n\n\ndef parse_limit(params, DEFAULT_LIMIT, MIN_LIMIT, MAX_LIMIT):\n limit = int(params.get(ParamName.LIMIT, DEFAULT_LIMIT))\n return min(MAX_LIMIT, max(MIN_LIMIT, limit))\n\n\ndef parse_sorting(params, DEFAULT_SORTING):\n sorting = params.get(ParamName.SORTING, DEFAULT_SORTING)\n return sorting\n # sorting = params.get(ParamName.SORTING)\n # # (Any wrong value treated as default)\n # is_descending = sorting == (Sorting.ASCENDING if DEFAULT_SORTING == Sorting.DESCENDING else Sorting.DESCENDING)\n # return Sorting.DESCENDING if is_descending else Sorting.ASCENDING\n\n\ndef sort_from_to_params(from_value, to_value):\n # Swap if from_value > to_value\n return (to_value, from_value) if from_value is not None and to_value is not None \\\n and from_value > to_value else (from_value, to_value)\n\n\n# Prepare response\n\ndef make_data_response(data, item_format, is_convert_to_list=True):\n result = None\n if data:\n if isinstance(data, Exception):\n return make_error_response(exception=data)\n\n if not isinstance(data, list) or not isinstance(data[0], list):\n # {\"param1\": \"prop1\", \"param2\": \"prop2\"} -> [{\"param1\": \"prop1\", \"param2\": \"prop2\"}]\n # [\"prop1\", \"prop2\"] -> [[\"prop1\", \"prop2\"]]\n data = [data]\n\n if isinstance(data[0], list):\n # [[\"prop1\", \"prop2\"], [\"prop1\", \"prop2\"]] -> same\n result = data if is_convert_to_list else convert_items_list_to_dict(data, item_format)\n elif isinstance(data[0], dict):\n # [{\"param1\": \"prop1\", \"param2\": \"prop2\"}] -> [[\"prop1\", \"prop2\"]]\n result = convert_items_dict_to_list(data, item_format) if is_convert_to_list else data\n # elif isinstance(data[0], DataObject):\n else:\n result = convert_items_obj_to_list(data, item_format) if is_convert_to_list else \\\n convert_items_obj_to_dict(data, item_format)\n\n return JsonResponse({\n \"data\": result if result else [],\n })\n\n\ndef make_error_response(error_code=None, exception=None, **kwargs):\n if not error_code and exception:\n if isinstance(exception, ServerException):\n error_code = ErrorCode.APP_DB_ERROR\n else:\n error_code = ErrorCode.APP_ERROR\n\n return JsonResponse({\"error\": {\n \"code\": error_code,\n \"message\": ErrorCode.get_message_by_code(error_code, **kwargs)\n }})\n\n\ndef make_format_response(item_format):\n values = {\n ParamName.PLATFORM_ID: Platform.name_by_id,\n # ParamName.PLATFORM: Platform.name_by_id,\n ParamName.DIRECTION: Direction.name_by_value,\n }\n return JsonResponse({\n \"item_format\": item_format,\n \"values\": {k: v for k, v in values.items() if k in item_format},\n\n \"example_item\": {\"data\": [[name + \"X\" for name in item_format]]},\n # \"example_item\": {\"data\": [name + \"X\" for name in item_format]}, # ?\n \"example_history\": {\"data\": [[name + str(i) for name in item_format] for i in range(3)]},\n \"example_error\": {\"error\": {\"code\": 1, \"message\": \"Error description.\"}},\n })\n\n\n# Utility:\n\n# Convert items\n\ndef convert_items_obj_to_list(item_or_items, item_format):\n if not item_or_items:\n return item_or_items\n return _convert_item_or_items_with_fun(item_or_items, item_format, _convert_items_obj_to_list)\n\n\ndef convert_items_dict_to_list(item_or_items, item_format):\n if not item_or_items:\n return item_or_items\n return _convert_item_or_items_with_fun(item_or_items, item_format, _convert_items_dict_to_list)\n\n\ndef convert_items_list_to_dict(item_or_items, item_format):\n if not item_or_items:\n return item_or_items\n return _convert_item_or_items_with_fun(item_or_items, item_format, _convert_items_list_to_dict)\n\n\ndef convert_items_obj_to_dict(item_or_items, item_format):\n if not item_or_items:\n return item_or_items\n return _convert_item_or_items_with_fun(item_or_items, item_format, _convert_items_obj_to_dict)\n\n\ndef _convert_item_or_items_with_fun(item_or_items, item_format, fun):\n # Input item - output item,\n # input items - output items\n if not item_format:\n raise Exception(\"item_format cannot be None!\")\n\n is_list = isinstance(item_or_items, (list, tuple))\n if is_list:\n for element in item_or_items:\n if element:\n # Check the first not None element is not an item\n # (list, dict (iterable but not a str) or object (has __dict__))\n if isinstance(element, str) or not isinstance(element, Iterable) and \\\n not hasattr(element, \"__dict__\"):\n is_list = False\n break\n items = item_or_items if is_list else [item_or_items]\n # Convert\n result = fun(items, item_format) if items else []\n return result if is_list else result[0]\n\n\ndef _convert_items_obj_to_list(items, item_format):\n return [[getattr(item, p) for p in item_format if hasattr(item, p)] if item is not None else None\n for item in items] if items else []\n\n\ndef _convert_items_dict_to_list(items, item_format):\n return [[item[p] for p in item_format if p in item] if item is not None else None\n for item in items] if items else []\n\n\ndef _convert_items_list_to_dict(items, item_format):\n index_property_list = list(enumerate(item_format))\n return [{p: item[i] for i, p in index_property_list if i < len(item)} if item is not None else None\n for item in items] if items else []\n\n\ndef _convert_items_obj_to_dict(items, item_format):\n return [{p: getattr(item, p) for p in item_format if hasattr(item, p)} if item is not None else None\n for item in items] if items else []\n" }, { "alpha_fraction": 0.6810919046401978, "alphanum_fraction": 0.6817576289176941, "avg_line_length": 39.040000915527344, "blob_id": "a729f04a1ab6a33e47cd084d5187da9c951334be", "content_id": "063b7b5317513624dd63260aac2ac3f5ee0457ef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3004, "license_type": "permissive", "max_line_length": 117, "num_lines": 75, "path": "/hyperquant/clients/tests/test_utils.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "import unittest\n\nfrom hyperquant.api import Platform\nfrom hyperquant.clients.binance import BinanceRESTClient, BinanceWSClient\nfrom hyperquant.clients.bitfinex import BitfinexRESTClient, BitfinexWSClient\nfrom hyperquant.clients.bitmex import BitMEXRESTClient, BitMEXWSClient\nfrom hyperquant.clients.utils import create_rest_client, create_ws_client\n\n\nclass TestCreateClient(unittest.TestCase):\n\n def test_create_rest_client(self):\n self._test_create_client()\n\n def test_create_ws_client(self):\n self._test_create_client(False)\n\n def test_create_rest_client_private(self):\n self._test_create_client(is_private=True)\n\n def test_create_ws_client_private(self):\n self._test_create_client(False, is_private=True)\n\n def _test_create_client(self, is_rest=True, is_private=False):\n create_client = create_rest_client if is_rest else create_ws_client\n\n # Binance\n client = create_client(Platform.BINANCE, is_private)\n\n self.assertIsInstance(client, BinanceRESTClient if is_rest else BinanceWSClient)\n self.assertEqual(client.version, BinanceRESTClient.version)\n if not is_private:\n self.assertIsNotNone(client._api_key,\n \"For Binance, api_key must be set even for public API (for historyTrades endponit)\")\n self.assertIsNone(client._api_secret)\n else:\n self.assertIsNotNone(client._api_key)\n self.assertIsNotNone(client._api_secret)\n\n # Bitfinex\n client = create_client(Platform.BITFINEX, is_private)\n\n self.assertIsInstance(client, BitfinexRESTClient if is_rest else BitfinexWSClient)\n self.assertEqual(client.version, BitfinexRESTClient.version)\n if not is_private:\n self.assertIsNone(client._api_key)\n self.assertIsNone(client._api_secret)\n else:\n self.assertIsNotNone(client._api_key)\n self.assertIsNotNone(client._api_secret)\n\n # Testing version\n client = create_client(Platform.BITFINEX, is_private, version=\"1\")\n\n self.assertIsInstance(client, BitfinexRESTClient if is_rest else BitfinexWSClient)\n self.assertEqual(client.version, \"1\")\n self.assertNotEqual(client.version, BitfinexRESTClient.version)\n if not is_private:\n self.assertIsNone(client._api_key)\n self.assertIsNone(client._api_secret)\n else:\n self.assertIsNotNone(client._api_key)\n self.assertIsNotNone(client._api_secret)\n\n # BitMEX\n client = create_client(Platform.BITMEX, is_private)\n\n self.assertIsInstance(client, BitMEXRESTClient if is_rest else BitMEXWSClient)\n self.assertEqual(client.version, BitMEXRESTClient.version)\n if not is_private:\n self.assertIsNone(client._api_key)\n self.assertIsNone(client._api_secret)\n else:\n self.assertIsNotNone(client._api_key)\n self.assertIsNotNone(client._api_secret)\n\n" }, { "alpha_fraction": 0.7066051959991455, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 30.7560977935791, "blob_id": "80384ffba57441dfbdd24654ad24a2bb922d69bc", "content_id": "03c1ae25d4a6daca6dcd8bd630b396a3f6195b4c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1302, "license_type": "permissive", "max_line_length": 100, "num_lines": 41, "path": "/run_demo.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "import time\n\nfrom django.conf import settings\n\nimport settings as hqlib_settings\nfrom hyperquant.api import Interval\nfrom hyperquant.clients import (\n utils, Endpoint, Platform\n)\nfrom hyperquant.clients.tests.utils import set_up_logging\n\nsettings.configure(DEBUG=True, default_settings=hqlib_settings)\n\n# Enable logging if needed\n# set_up_logging()\n\n\n# Change to Platform.BINANCE to see example\nTEST_PLATFORM = Platform.BINANCE\n\nTEST_SYMBOLS = {\n Platform.BINANCE: ['ETHBTC', 'BTCUSDT'],\n # Platform.OKEX: ['eth_btc', 'btc_usdt'],\n}\n\nclient = utils.create_rest_client(platform_id=TEST_PLATFORM)\nprint('\\n\\nTrade history\\n\\n')\nprint(client.fetch_trades_history(TEST_SYMBOLS[TEST_PLATFORM][0], limit=10))\nprint('\\n\\n---------------------')\nprint('\\n\\nCandles\\n\\n')\nprint(client.fetch_candles(TEST_SYMBOLS[TEST_PLATFORM][0], Interval.MIN_1, limit=10))\nprint('\\n\\n---------------------')\n\n# client = utils.create_ws_client(platform_id=TEST_PLATFORM)\n# client.on_data_item = lambda item: print(item) # print received parsed objects\n# client.subscribe(endpoints=[Endpoint.TRADE, Endpoint.CANDLE], symbols=TEST_SYMBOLS[TEST_PLATFORM],\n# interval=Interval.MIN_1)\n#\n# print('\\n\\nWebsocket data\\n\\n')\n# # Sleep to display incoming websocket items from separate thread\n# time.sleep(15)\n" }, { "alpha_fraction": 0.3527272641658783, "alphanum_fraction": 0.3527272641658783, "avg_line_length": 20.230770111083984, "blob_id": "9644f3c0e03f7170cc4c186dcaa51efd44d8f900", "content_id": "cca9de715c9134b71319ef4ddd454eea5a068c07", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "permissive", "max_line_length": 37, "num_lines": 13, "path": "/settings.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "# Django settings for testing clients\nCREDENTIALS_BY_PLATFORM = {\n \"BINANCE\": (\"rlvKnLAocc9R4HLU8q51elHte7WCqRwXAHuQ6p1ltKmMkQc2QCah8aSo9p3SDoOG\",\n \"\", # \"MvO6MAerLLi715qKGBRoKOSNAJXnAJpxfMHOU5uEAv6Yw5QyVJnFFQmjGTX7ZKNr\"\n ),\n \"BITFINEX\": (\"\",\n \"\"),\n \"BITMEX\": (\"\",\n \"\"),\n # (\"zt2U_wjwvPPbPE3T6nRTzVKr\",\n # \"3p-tCyGeFJc6-_RL-Q_hnn9NPowI-zTkhOtcZZipHihzG1Qy\"),\n}\nSECRET_KEY = \"ddd\"" }, { "alpha_fraction": 0.7130197882652283, "alphanum_fraction": 0.7130197882652283, "avg_line_length": 33.51764678955078, "blob_id": "c4931fc46abfea8c7df5f27e825ca7ba23e0d23e", "content_id": "4753fd7e3c618368a6403069a9e6bb209edc8376", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2934, "license_type": "permissive", "max_line_length": 100, "num_lines": 85, "path": "/hyperquant/clients/utils.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "from django.conf import settings\n\nfrom hyperquant.api import Platform\nfrom hyperquant.clients.binance import BinanceRESTClient, BinanceWSClient\nfrom hyperquant.clients.bitfinex import BitfinexRESTClient, BitfinexWSClient\nfrom hyperquant.clients.bitmex import BitMEXRESTClient, BitMEXWSClient\n\n# temp\n# if not settings.configured:\n# # todo add default credentials\n# print(\"settings.configure() for clients\")\n# settings.configure(base)\n\n_rest_client_class_by_platform_id = {\n Platform.BINANCE: BinanceRESTClient,\n Platform.BITFINEX: BitfinexRESTClient,\n Platform.BITMEX: BitMEXRESTClient,\n}\n\n_ws_client_class_by_platform_id = {\n Platform.BINANCE: BinanceWSClient,\n Platform.BITFINEX: BitfinexWSClient,\n Platform.BITMEX: BitMEXWSClient,\n}\n\n_rest_client_by_platform_id = {}\n_private_rest_client_by_platform_id = {}\n_ws_client_by_platform_id = {}\n_private_ws_client_by_platform_id = {}\n\n\ndef create_rest_client(platform_id, is_private=False, version=None):\n return _create_client(platform_id, True, is_private, version)\n\n\ndef get_or_create_rest_client(platform_id, is_private=False):\n return _get_or_create_client(platform_id, True, is_private)\n\n\ndef create_ws_client(platform_id, is_private=False, version=None):\n return _create_client(platform_id, False, is_private, version)\n\n\ndef get_or_create_ws_client(platform_id, is_private=False):\n return _get_or_create_client(platform_id, False, is_private)\n\n\ndef get_credentials_for(platform_id):\n platform_name = Platform.get_platform_name_by_id(platform_id)\n api_key, api_secret = settings.CREDENTIALS_BY_PLATFORM.get(platform_name)\n return api_key, api_secret\n\n\ndef _create_client(platform_id, is_rest, is_private=False, version=None):\n # Create\n class_lookup = _rest_client_class_by_platform_id if is_rest else _ws_client_class_by_platform_id\n client_class = class_lookup.get(platform_id)\n if is_private:\n api_key, api_secret = get_credentials_for(platform_id)\n client = client_class(api_key, api_secret, version)\n client.platform_id = platform_id # If not set in class\n else:\n client = client_class(version=version)\n client.platform_id = platform_id # If not set in class\n\n # For Binance's \"historicalTrades\" endpoint\n if platform_id == Platform.BINANCE:\n api_key, _ = get_credentials_for(platform_id)\n client.set_credentials(api_key, None)\n return client\n\n\ndef _get_or_create_client(platform_id, is_rest, is_private=False):\n # Get\n if is_rest:\n lookup = _private_rest_client_by_platform_id if is_private else _rest_client_by_platform_id\n else:\n lookup = _private_ws_client_by_platform_id if is_private else _ws_client_by_platform_id\n client = lookup.get(platform_id)\n if client:\n return client\n\n # Create\n lookup[platform_id] = client = _create_client(platform_id, is_rest, is_private)\n return client\n" }, { "alpha_fraction": 0.5753995180130005, "alphanum_fraction": 0.5850213766098022, "avg_line_length": 35.418033599853516, "blob_id": "9b97d05eeb01616b283833b5750204ff609565f3", "content_id": "5022f5ea0a2e0d75d96ccdc67863941f923a7cef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17772, "license_type": "permissive", "max_line_length": 119, "num_lines": 488, "path": "/hyperquant/clients/binance.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "import hashlib\nimport hmac\nfrom operator import itemgetter\n\nfrom hyperquant.api import Platform, Sorting, Interval, Direction, OrderType\nfrom hyperquant.clients import WSClient, Endpoint, Trade, Error, ErrorCode, \\\n ParamName, WSConverter, RESTConverter, PrivatePlatformRESTClient, MyTrade, Candle, Ticker, OrderBookItem, Order, \\\n OrderBook, Account, Balance\n\n\n# REST\n\n# TODO check getting trades history from_id=1\nclass BinanceRESTConverterV1(RESTConverter):\n # Main params:\n base_url = \"https://api.binance.com/api/v{version}/\"\n\n # Settings:\n\n # Converting info:\n # For converting to platform\n endpoint_lookup = {\n Endpoint.PING: \"ping\",\n Endpoint.SERVER_TIME: \"time\",\n Endpoint.SYMBOLS: \"exchangeInfo\",\n Endpoint.TRADE: \"trades\",\n Endpoint.TRADE_HISTORY: \"historicalTrades\",\n Endpoint.TRADE_MY: \"myTrades\", # Private\n Endpoint.CANDLE: \"klines\",\n Endpoint.TICKER: \"ticker/price\",\n Endpoint.ORDER_BOOK: \"depth\",\n # Private\n Endpoint.ACCOUNT: \"account\",\n Endpoint.ORDER: \"order\",\n Endpoint.ORDER_CURRENT: \"openOrders\",\n Endpoint.ORDER_MY: \"allOrders\",\n }\n param_name_lookup = {\n ParamName.SYMBOL: \"symbol\",\n ParamName.LIMIT: \"limit\",\n ParamName.IS_USE_MAX_LIMIT: None,\n # ParamName.SORTING: None,\n ParamName.INTERVAL: \"interval\",\n ParamName.DIRECTION: \"side\",\n ParamName.ORDER_TYPE: \"type\",\n\n ParamName.TIMESTAMP: \"timestamp\",\n ParamName.FROM_ITEM: \"fromId\",\n ParamName.TO_ITEM: None,\n ParamName.FROM_TIME: \"startTime\",\n ParamName.TO_TIME: \"endTime\",\n\n ParamName.PRICE: \"price\",\n ParamName.AMOUNT: \"quantity\",\n # -ParamName.ASKS: \"asks\",\n # ParamName.BIDS: \"bids\",\n }\n param_value_lookup = {\n # Sorting.ASCENDING: None,\n # Sorting.DESCENDING: None,\n Sorting.DEFAULT_SORTING: Sorting.ASCENDING,\n\n Interval.MIN_1: \"1m\",\n Interval.MIN_3: \"3m\",\n Interval.MIN_5: \"5m\",\n Interval.MIN_15: \"15m\",\n Interval.MIN_30: \"30m\",\n Interval.HRS_1: \"1h\",\n Interval.HRS_2: \"2h\",\n Interval.HRS_4: \"4h\",\n Interval.HRS_6: \"6h\",\n Interval.HRS_8: \"8h\",\n Interval.HRS_12: \"12h\",\n Interval.DAY_1: \"1d\",\n Interval.DAY_3: \"3d\",\n Interval.WEEK_1: \"1w\",\n Interval.MONTH_1: \"1M\",\n\n # By properties:\n ParamName.DIRECTION: {\n Direction.SELL: \"SELL\",\n Direction.BUY: \"BUY\",\n },\n ParamName.ORDER_TYPE: {\n OrderType.LIMIT: \"LIMIT\",\n OrderType.MARKET: \"MARKET\",\n },\n # ParamName.ORDER_STATUS: {\n # OrderStatus.: \"\",\n # },\n }\n max_limit_by_endpoint = {\n Endpoint.TRADE: 1000,\n Endpoint.TRADE_HISTORY: 1000,\n Endpoint.ORDER_BOOK: 1000,\n Endpoint.CANDLE: 1000,\n }\n\n # For parsing\n\n param_lookup_by_class = {\n # Error\n Error: {\n \"code\": \"code\",\n \"msg\": \"message\",\n },\n # Data\n Trade: {\n \"time\": ParamName.TIMESTAMP,\n \"id\": ParamName.ITEM_ID,\n \"price\": ParamName.PRICE,\n \"qty\": ParamName.AMOUNT,\n # \"isBuyerMaker\": \"\",\n # \"isBestMatch\": \"\",\n },\n MyTrade: {\n \"symbol\": ParamName.SYMBOL,\n \"time\": ParamName.TIMESTAMP,\n \"id\": ParamName.ITEM_ID,\n \"price\": ParamName.PRICE,\n \"qty\": ParamName.AMOUNT,\n\n \"orderId\": ParamName.ORDER_ID,\n \"commission\": ParamName.FEE,\n # \"commissionAsset\": ParamName.FEE_SYMBOL,\n # \"\": ParamName.REBATE,\n },\n Candle: [\n ParamName.TIMESTAMP,\n ParamName.PRICE_OPEN,\n ParamName.PRICE_HIGH,\n ParamName.PRICE_LOW,\n ParamName.PRICE_CLOSE,\n None, # ParamName.AMOUNT, # only volume present\n None,\n None,\n ParamName.TRADES_COUNT,\n # ParamName.INTERVAL,\n ],\n Ticker: {\n \"symbol\": ParamName.SYMBOL,\n \"price\": ParamName.PRICE,\n },\n Account: {\n \"updateTime\": ParamName.TIMESTAMP,\n \"balances\": ParamName.BALANCES,\n },\n Balance: {\n \"asset\": ParamName.SYMBOL,\n \"free\": ParamName.AMOUNT_AVAILABLE,\n \"locked\": ParamName.AMOUNT_RESERVED,\n },\n Order: {\n \"symbol\": ParamName.SYMBOL,\n \"transactTime\": ParamName.TIMESTAMP,\n \"time\": ParamName.TIMESTAMP, # check \"time\" or \"updateTime\"\n \"updateTime\": ParamName.TIMESTAMP,\n \"orderId\": ParamName.ITEM_ID,\n \"clientOrderId\": ParamName.USER_ORDER_ID,\n\n \"type\": ParamName.ORDER_TYPE,\n \"price\": ParamName.PRICE,\n \"origQty\": ParamName.AMOUNT_ORIGINAL,\n \"executedQty\": ParamName.AMOUNT_EXECUTED,\n \"side\": ParamName.DIRECTION,\n \"status\": ParamName.ORDER_STATUS,\n },\n OrderBook: {\n \"lastUpdateId\": ParamName.ITEM_ID,\n \"bids\": ParamName.BIDS,\n \"asks\": ParamName.ASKS,\n },\n OrderBookItem: [ParamName.PRICE, ParamName.AMOUNT],\n }\n\n error_code_by_platform_error_code = {\n -2014: ErrorCode.UNAUTHORIZED,\n -1121: ErrorCode.WRONG_SYMBOL,\n -1100: ErrorCode.WRONG_PARAM,\n }\n error_code_by_http_status = {\n 429: ErrorCode.RATE_LIMIT,\n 418: ErrorCode.IP_BAN,\n }\n\n # For converting time\n is_source_in_milliseconds = True\n\n # timestamp_platform_names = [ParamName.TIMESTAMP]\n\n def _process_param_value(self, name, value):\n if name == ParamName.FROM_ITEM or name == ParamName.TO_ITEM:\n if isinstance(value, Trade): # ItemObject):\n return value.item_id\n return super()._process_param_value(name, value)\n\n def parse(self, endpoint, data):\n if endpoint == Endpoint.SERVER_TIME and data:\n timestamp_ms = data.get(\"serverTime\")\n return timestamp_ms / 1000 if not self.use_milliseconds and timestamp_ms else timestamp_ms\n if endpoint == Endpoint.SYMBOLS and data and ParamName.SYMBOLS in data:\n exchange_info = data[ParamName.SYMBOLS]\n # (There are only 2 statuses: \"TRADING\" and \"BREAK\")\n # symbols = [item[ParamName.SYMBOL] for item in exchange_info if item[\"status\"] == \"TRADING\"]\n symbols = [item[ParamName.SYMBOL] for item in exchange_info]\n return symbols\n\n result = super().parse(endpoint, data)\n return result\n\n # def preprocess_params(self, endpoint, params):\n # if endpoint in self.secured_endpoints:\n # params[ParamName.TIMESTAMP] = int(time.time() * 1000)\n #\n # return super().preprocess_params(endpoint, params)\n\n def _generate_and_add_signature(self, platform_params, api_key, api_secret):\n if not api_key or not api_secret:\n self.logger.error(\"Empty api_key or api_secret. Cannot generate signature.\")\n return None\n ordered_params_list = self._order_params(platform_params)\n # print(\"ordered_platform_params:\", ordered_params_list)\n query_string = \"&\".join([\"{}={}\".format(d[0], d[1]) for d in ordered_params_list])\n # print(\"query_string:\", query_string)\n m = hmac.new(api_secret.encode(\"utf-8\"), query_string.encode(\"utf-8\"), hashlib.sha256)\n signature = m.hexdigest()\n # Add\n # platform_params[\"signature\"] = signature # no need\n # if ordered_params_list and ordered_params_list[-1][0] != \"signature\":\n ordered_params_list.append((\"signature\", signature))\n return ordered_params_list\n\n def _order_params(self, platform_params):\n # Convert params to sorted list with signature as last element.\n\n params_list = [(key, value) for key, value in platform_params.items() if key != \"signature\"]\n # Sort parameters by key\n params_list.sort(key=itemgetter(0))\n # Append signature to the end if present\n if \"signature\" in platform_params:\n params_list.append((\"signature\", platform_params[\"signature\"]))\n return params_list\n\n\nclass BinanceRESTClient(PrivatePlatformRESTClient):\n # Settings:\n platform_id = Platform.BINANCE\n version = \"1\" # Default version\n\n _converter_class_by_version = {\n \"1\": BinanceRESTConverterV1,\n \"3\": BinanceRESTConverterV1, # Only for some methods (same converter used)\n }\n\n # State:\n ratelimit_error_in_row_count = 0\n\n @property\n def headers(self):\n result = super().headers\n result[\"X-MBX-APIKEY\"] = self._api_key\n result[\"Content-Type\"] = \"application/x-www-form-urlencoded\"\n return result\n\n def _on_response(self, response, result):\n # super()._on_response(response, result)\n\n self.delay_before_next_request_sec = 0\n if isinstance(result, Error):\n if result.code == ErrorCode.RATE_LIMIT:\n self.ratelimit_error_in_row_count += 1\n self.delay_before_next_request_sec = 60 * 2 * self.ratelimit_error_in_row_count # some number - change\n elif result.code == ErrorCode.IP_BAN:\n self.ratelimit_error_in_row_count += 1\n self.delay_before_next_request_sec = 60 * 5 * self.ratelimit_error_in_row_count # some number - change\n else:\n self.ratelimit_error_in_row_count = 0\n else:\n self.ratelimit_error_in_row_count = 0\n\n def fetch_history(self, endpoint, symbol, limit=None, from_item=None, to_item=None, sorting=None,\n is_use_max_limit=False, from_time=None, to_time=None,\n version=None, **kwargs):\n if from_item is None:\n from_item = 0\n return super().fetch_history(endpoint, symbol, limit, from_item, to_item, sorting, is_use_max_limit, from_time,\n to_time, **kwargs)\n\n def fetch_order_book(self, symbol=None, limit=None, is_use_max_limit=False, version=None, **kwargs):\n LIMIT_VALUES = [5, 10, 20, 50, 100, 500, 1000]\n if limit not in LIMIT_VALUES:\n self.logger.error(\"Limit value %s not in %s\", limit, LIMIT_VALUES)\n return super().fetch_order_book(symbol, limit, is_use_max_limit, **kwargs)\n\n def fetch_tickers(self, symbols=None, version=None, **kwargs):\n items = super().fetch_tickers(symbols, version or \"3\", **kwargs)\n\n # (Binance returns timestamp only for /api/v1/ticker/24hr which has weight of 40.\n # /api/v3/ticker/price - has weight 2.)\n timestamp = self.get_server_timestamp(version)\n for item in items:\n item.timestamp = timestamp\n item.use_milliseconds = self.use_milliseconds\n\n return items\n\n def fetch_account_info(self, version=None, **kwargs):\n return super().fetch_account_info(version or \"3\", **kwargs)\n\n def create_order(self, symbol, order_type, direction, price=None, amount=None, is_test=False, version=None,\n **kwargs):\n if order_type == OrderType.LIMIT:\n # (About values:\n # https://www.reddit.com/r/BinanceExchange/comments/8odvs4/question_about_time_in_force_binance_api/)\n kwargs[\"timeInForce\"] = \"GTC\"\n return super().create_order(symbol, order_type, direction, price, amount, is_test, version, **kwargs)\n\n def cancel_order(self, order, symbol=None, version=None, **kwargs):\n if hasattr(order, ParamName.SYMBOL) and order.symbol:\n symbol = order.symbol\n return super().cancel_order(order, symbol, version, **kwargs)\n\n def check_order(self, order, symbol=None, version=None, **kwargs):\n if hasattr(order, ParamName.SYMBOL) and order.symbol:\n symbol = order.symbol\n return super().check_order(order, symbol, version, **kwargs)\n\n # def fetch_orders(self, symbol=None, limit=None, from_item=None, is_open=False, version=None, **kwargs):\n # return super().fetch_orders(symbol, limit, from_item, is_open, version, **kwargs)\n\n def _send(self, method, endpoint, params=None, version=None, **kwargs):\n if endpoint in self.converter.secured_endpoints:\n server_timestamp = self.get_server_timestamp()\n params[ParamName.TIMESTAMP] = server_timestamp if self.use_milliseconds else int(server_timestamp * 1000)\n return super()._send(method, endpoint, params, version, **kwargs)\n\n\n# WebSocket\n\nclass BinanceWSConverterV1(WSConverter):\n # Main params:\n base_url = \"wss://stream.binance.com:9443/\"\n\n IS_SUBSCRIPTION_COMMAND_SUPPORTED = False\n\n # supported_endpoints = [Endpoint.TRADE]\n # symbol_endpoints = [Endpoint.TRADE]\n # supported_symbols = None\n\n # Settings:\n\n # Converting info:\n # For converting to platform\n\n endpoint_lookup = {\n Endpoint.TRADE: \"{symbol}@trade\",\n Endpoint.CANDLE: \"{symbol}@kline_{interval}\",\n Endpoint.TICKER: \"{symbol}@miniTicker\",\n Endpoint.TICKER_ALL: \"!miniTicker@arr\",\n Endpoint.ORDER_BOOK: \"{symbol}@depth{level}\",\n Endpoint.ORDER_BOOK_DIFF: \"{symbol}@depth\",\n }\n\n # For parsing\n param_lookup_by_class = {\n # Error\n Error: {\n # \"code\": \"code\",\n # \"msg\": \"message\",\n },\n # Data\n Trade: {\n \"s\": ParamName.SYMBOL,\n \"T\": ParamName.TIMESTAMP,\n \"t\": ParamName.ITEM_ID,\n \"p\": ParamName.PRICE,\n \"q\": ParamName.AMOUNT,\n # \"m\": \"\",\n },\n Candle: {\n \"s\": ParamName.SYMBOL,\n \"t\": ParamName.TIMESTAMP,\n \"i\": ParamName.INTERVAL,\n\n \"o\": ParamName.PRICE_OPEN,\n \"c\": ParamName.PRICE_CLOSE,\n \"h\": ParamName.PRICE_HIGH,\n \"l\": ParamName.PRICE_LOW,\n \"\": ParamName.AMOUNT, # only volume present\n \"n\": ParamName.TRADES_COUNT,\n },\n Ticker: {\n \"s\": ParamName.SYMBOL,\n \"E\": ParamName.TIMESTAMP,\n \"c\": ParamName.PRICE, # todo check to know for sure\n },\n OrderBook: {\n # Partial Book Depth Streams\n \"lastUpdateId\": ParamName.ITEM_ID,\n \"asks\": ParamName.ASKS,\n \"bids\": ParamName.BIDS,\n # Diff. Depth Stream\n \"s\": ParamName.SYMBOL,\n \"E\": ParamName.TIMESTAMP,\n \"u\": ParamName.ITEM_ID,\n \"b\": ParamName.BIDS,\n \"a\": ParamName.ASKS,\n },\n OrderBookItem: [ParamName.PRICE, ParamName.AMOUNT],\n }\n event_type_param = \"e\"\n endpoint_by_event_type = {\n \"trade\": Endpoint.TRADE,\n \"kline\": Endpoint.CANDLE,\n \"24hrMiniTicker\": Endpoint.TICKER,\n \"24hrTicker\": Endpoint.TICKER,\n \"depthUpdate\": Endpoint.ORDER_BOOK,\n # \"depthUpdate\": Endpoint.ORDER_BOOK_DIFF,\n }\n\n # https://github.com/binance-exchange/binance-official-api-docs/blob/master/errors.md\n error_code_by_platform_error_code = {\n # -2014: ErrorCode.UNAUTHORIZED,\n # -1121: ErrorCode.WRONG_SYMBOL,\n # -1100: ErrorCode.WRONG_PARAM,\n }\n error_code_by_http_status = {}\n\n # For converting time\n is_source_in_milliseconds = True\n\n def _generate_subscription(self, endpoint, symbol=None, **params):\n return super()._generate_subscription(endpoint, symbol.lower() if symbol else symbol, **params)\n\n def parse(self, endpoint, data):\n if \"data\" in data:\n # stream = data[\"stream\"] # no need\n data = data[\"data\"]\n return super().parse(endpoint, data)\n\n def _parse_item(self, endpoint, item_data):\n if endpoint == Endpoint.CANDLE and \"k\" in item_data:\n item_data = item_data[\"k\"]\n return super()._parse_item(endpoint, item_data)\n\n\nclass BinanceWSClient(WSClient):\n platform_id = Platform.BINANCE\n version = \"1\" # Default version\n\n _converter_class_by_version = {\n \"1\": BinanceWSConverterV1,\n }\n\n @property\n def url(self):\n # Generate subscriptions\n if not self.current_subscriptions:\n self.logger.warning(\"Making URL while current_subscriptions are empty. \"\n \"There is no sense to connect without subscriptions.\")\n subscriptions = \"\"\n # # There is no sense to connect without subscriptions\n # return None\n elif len(self.current_subscriptions) > 1:\n subscriptions = \"stream?streams=\" + \"/\".join(self.current_subscriptions)\n else:\n subscriptions = \"ws/\" + \"\".join(self.current_subscriptions)\n\n self.is_subscribed_with_url = True\n return super().url + subscriptions\n\n def subscribe(self, endpoints=None, symbols=None, **params):\n self._check_params(endpoints, symbols, **params)\n\n super().subscribe(endpoints, symbols, **params)\n\n def unsubscribe(self, endpoints=None, symbols=None, **params):\n self._check_params(endpoints, symbols, **params)\n\n super().unsubscribe(endpoints, symbols, **params)\n\n def _check_params(self, endpoints=None, symbols=None, **params):\n LEVELS_AVAILABLE = [5, 10, 20]\n if endpoints and Endpoint.ORDER_BOOK in endpoints and ParamName.LEVEL in params and \\\n params.get(ParamName.LEVEL) not in LEVELS_AVAILABLE:\n self.logger.error(\"For %s endpoint %s param must be of values: %s, but set: %s\",\n Endpoint.ORDER_BOOK, ParamName.LEVEL, LEVELS_AVAILABLE,\n params.get(ParamName.LEVEL))\n" }, { "alpha_fraction": 0.7183098793029785, "alphanum_fraction": 0.7183098793029785, "avg_line_length": 13.199999809265137, "blob_id": "2d1a0b2f4bb4568f09d7722f48b972425fb08f08", "content_id": "032c6f9977bd7aa32e35213700c4e8b6e5ae5ad7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 142, "license_type": "permissive", "max_line_length": 48, "num_lines": 10, "path": "/README.md", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "# hqlib\nCommon library for HyperQuant projects on Python\n\n## Install\n\n pipenv install\n\n## Run demo code\n\n pipenv run python run_demo.py\n" }, { "alpha_fraction": 0.5443465709686279, "alphanum_fraction": 0.5632166266441345, "avg_line_length": 36.148094177246094, "blob_id": "69118a3b10299c40f5fd82f4ef57afb7b8f32e17", "content_id": "60b7e4f8d291e8b9ffd15bf75c6e6d7b46a7acd7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26428, "license_type": "permissive", "max_line_length": 110, "num_lines": 709, "path": "/hyperquant/clients/bitfinex.py", "repo_name": "assassinen/hyperquant", "src_encoding": "UTF-8", "text": "import hashlib\nimport hmac\nimport time\n\nfrom hyperquant.api import Platform, Sorting, Direction\nfrom hyperquant.clients import Endpoint, WSClient, Trade, ParamName, Error, \\\n ErrorCode, Channel, \\\n Info, WSConverter, RESTConverter, PlatformRESTClient, PrivatePlatformRESTClient\n\n\n# https://docs.bitfinex.com/v1/docs\n# https://docs.bitfinex.com/v2/docs\n\n# REST\n\nclass BitfinexRESTConverterV1(RESTConverter):\n # Main params:\n base_url = \"https://api.bitfinex.com/v{version}/\"\n\n IS_SORTING_ENABLED = False\n\n # Settings:\n\n # Converting info:\n # For converting to platform\n endpoint_lookup = {\n Endpoint.TRADE: \"trades/{symbol}\",\n Endpoint.TRADE_HISTORY: \"trades/{symbol}\", # same, not implemented for this version\n }\n param_name_lookup = {\n ParamName.LIMIT: \"limit_trades\",\n ParamName.IS_USE_MAX_LIMIT: None,\n ParamName.SORTING: None, # not supported\n ParamName.FROM_ITEM: \"timestamp\",\n ParamName.TO_ITEM: \"timestamp\", # ?\n ParamName.FROM_TIME: \"timestamp\",\n ParamName.TO_TIME: None, # ?\n }\n param_value_lookup = {\n # Sorting.ASCENDING: None,\n # Sorting.DESCENDING: None,\n Sorting.DEFAULT_SORTING: Sorting.DESCENDING,\n }\n max_limit_by_endpoint = {\n Endpoint.TRADE: 1000,\n Endpoint.TRADE_HISTORY: 1000, # same, not implemented for this version\n }\n\n # For parsing\n\n param_lookup_by_class = {\n Error: {\n \"message\": \"code\",\n # \"error\": \"code\",\n # \"message\": \"message\",\n },\n Trade: {\n \"tid\": ParamName.ITEM_ID,\n \"timestamp\": ParamName.TIMESTAMP,\n \"price\": ParamName.PRICE,\n \"amount\": ParamName.AMOUNT,\n \"type\": ParamName.DIRECTION,\n },\n }\n\n error_code_by_platform_error_code = {\n # \"\": ErrorCode.UNAUTHORIZED,\n \"Unknown symbol\": ErrorCode.WRONG_SYMBOL,\n # \"ERR_RATE_LIMIT\": ErrorCode.RATE_LIMIT,\n }\n error_code_by_http_status = {\n 429: ErrorCode.RATE_LIMIT,\n }\n\n # For converting time\n # is_source_in_milliseconds = True\n timestamp_platform_names = [ParamName.TIMESTAMP]\n\n def prepare_params(self, endpoint=None, params=None):\n resources, platform_params = super().prepare_params(endpoint, params)\n\n # (SYMBOL was used in URL path) (not necessary)\n if platform_params and ParamName.SYMBOL in platform_params:\n del platform_params[ParamName.SYMBOL]\n return resources, platform_params\n\n def parse(self, endpoint, data):\n if data and endpoint == Endpoint.SYMBOLS:\n return [item.upper() for item in data]\n return super().parse(endpoint, data)\n\n def _parse_item(self, endpoint, item_data):\n result = super()._parse_item(endpoint, item_data)\n\n # Convert Trade.direction\n if result and isinstance(result, Trade) and result.direction:\n # (Can be of \"sell\"|\"buy|\"\")\n result.direction = Direction.SELL if result.direction == \"sell\" else \\\n (Direction.BUY if result.direction == \"buy\" else None)\n\n return result\n\n\nclass BitfinexRESTConverterV2(RESTConverter):\n # Main params:\n base_url = \"https://api.bitfinex.com/v{version}/\"\n IS_SORTING_ENABLED = True\n\n # Settings:\n\n # Converting info:\n # For converting to platform\n endpoint_lookup = {\n Endpoint.TRADE: \"trades/t{symbol}/hist\", # same, not implemented for this version\n Endpoint.TRADE_HISTORY: \"trades/t{symbol}/hist\",\n }\n param_name_lookup = {\n ParamName.LIMIT: \"limit\",\n ParamName.IS_USE_MAX_LIMIT: None,\n ParamName.SORTING: \"sort\",\n ParamName.FROM_ITEM: \"start\",\n ParamName.TO_ITEM: \"end\",\n ParamName.FROM_TIME: \"start\",\n ParamName.TO_TIME: \"end\",\n }\n param_value_lookup = {\n Sorting.ASCENDING: 1,\n Sorting.DESCENDING: 0,\n Sorting.DEFAULT_SORTING: Sorting.DESCENDING,\n }\n max_limit_by_endpoint = {\n Endpoint.TRADE: 1000, # same, not implemented for this version\n Endpoint.TRADE_HISTORY: 1000,\n }\n\n # For parsing\n param_lookup_by_class = {\n # [\"error\",10020,\"limit: invalid\"]\n Error: [\"\", \"code\", \"message\"],\n # on trading pairs (ex. tBTCUSD) [ID, MTS, AMOUNT, PRICE]\n # [305430435,1539757383787,-0.086154,6760.7]\n # (on funding currencies (ex. fUSD) [ID, MTS, AMOUNT, RATE, PERIOD]) - not used now\n Trade: [ParamName.ITEM_ID, ParamName.TIMESTAMP, ParamName.AMOUNT, ParamName.PRICE],\n }\n\n error_code_by_platform_error_code = {\n # \"\": ErrorCode.UNAUTHORIZED,\n 10020: ErrorCode.WRONG_LIMIT,\n 11010: ErrorCode.RATE_LIMIT,\n }\n error_code_by_http_status = {}\n\n # For converting time\n is_source_in_milliseconds = True\n timestamp_platform_names = [\"start\", \"end\"]\n\n def prepare_params(self, endpoint=None, params=None):\n # # Symbol needs \"t\" prefix for trading pair\n # if ParamName.SYMBOL in params:\n # params[ParamName.SYMBOL] = \"t\" + str(params[ParamName.SYMBOL])\n\n resources, platform_params = super().prepare_params(endpoint, params)\n\n # (SYMBOL was used in URL path) (not necessary)\n if platform_params and ParamName.SYMBOL in platform_params:\n del platform_params[ParamName.SYMBOL]\n return resources, platform_params\n\n def _process_param_value(self, name, value):\n # # Symbol needs \"t\" prefix for trading pair\n # if name == ParamName.SYMBOL and value:\n # return \"t\" + value\n # elif\n if name == ParamName.FROM_ITEM or name == ParamName.TO_ITEM:\n if isinstance(value, Trade):\n return value.timestamp\n\n return super()._process_param_value(name, value)\n\n def _parse_item(self, endpoint, item_data):\n result = super()._parse_item(endpoint, item_data)\n\n if result and isinstance(result, Trade):\n # Determine direction\n result.direction = Direction.BUY if result.amount > 0 else Direction.SELL\n # Stringify and check sign\n result.price = str(result.price)\n result.amount = str(result.amount) if result.amount > 0 else str(-result.amount)\n return result\n\n def parse_error(self, error_data=None, response=None):\n result = super().parse_error(error_data, response)\n\n if error_data and isinstance(error_data, dict) and \"error\" in error_data:\n if error_data[\"error\"] == \"ERR_RATE_LIMIT\":\n result.error_code = ErrorCode.RATE_LIMIT\n result.message = ErrorCode.get_message_by_code(result.code) + result.message\n return result\n\n\nclass BitfinexRESTClient(PrivatePlatformRESTClient):\n platform_id = Platform.BITFINEX\n version = \"2\" # Default version\n _converter_class_by_version = {\n \"1\": BitfinexRESTConverterV1,\n \"2\": BitfinexRESTConverterV2,\n }\n\n def get_symbols(self, version=None):\n self.logger.info(\"Note: Bitfinex supports get_symbols only in v1.\")\n return super().get_symbols(version=\"1\")\n\n # # after_timestamp param can be added for v1, and after_timestamp, before_timestamp for v2\n # def fetch_trades(self, symbol, limit=None, **kwargs):\n # return super().fetch_trades(symbol, limit, **kwargs)\n\n # v1: Same as fetch_trades(), but result can be only reduced, but not extended\n def fetch_trades_history(self, symbol, limit=None, from_item=None,\n sorting=None, from_time=None, to_time=None, **kwargs):\n if from_item and self.version == \"1\":\n # todo check\n self.logger.warning(\"Bitfinex v1 API has no trades-history functionality.\")\n return None\n # return self.fetch_trades(symbol, limit, **kwargs)\n return super().fetch_trades_history(symbol, limit, from_item, sorting=sorting,\n from_time=from_time, to_time=to_time, **kwargs)\n\n def _on_response(self, response, result):\n # super()._on_response(response)\n\n if not response.ok and \"Retry-After\" in response.headers:\n self.delay_before_next_request_sec = int(response.headers[\"Retry-After\"])\n elif isinstance(result, Error):\n if result.code == ErrorCode.RATE_LIMIT:\n # Bitfinex API access is rate limited. The rate limit applies if an\n # IP address exceeds a certain number of requests per minute. The current\n # limit is between 10 and 45 to a specific REST API endpoint (ie. /ticker).\n # In case a client reaches the limit, we block the requesting IP address\n # for 10-60 seconds on that endpoint. The API will return the JSON response\n # {\"error\": \"ERR_RATE_LIMIT\"}. These DDoS defenses may change over time to\n # further improve reliability.\n self.delay_before_next_request_sec = 60\n else:\n self.delay_before_next_request_sec = 10\n\n\n# WebSocket\n\n\nclass BitfinexWSConverterV2(WSConverter):\n # Main params:\n base_url = \"wss://api.bitfinex.com/ws/{version}/\"\n\n IS_SUBSCRIPTION_COMMAND_SUPPORTED = True\n\n # supported_endpoints = [Endpoint.TRADE]\n # symbol_endpoints = [Endpoint.TRADE]\n # supported_symbols = None\n\n # Settings:\n\n # Converting info:\n # For converting to platform\n endpoint_lookup = {\n Endpoint.TRADE: \"trades\",\n }\n\n # For parsing\n item_class_by_endpoint = dict(**WSConverter.item_class_by_endpoint, **{\n # Item class by event type\n \"error\": Error,\n \"info\": Info,\n \"subscribed\": Channel,\n })\n param_lookup_by_class = {\n Error: {\n \"code\": \"code\",\n \"msg\": \"message\",\n },\n Info: {\n \"code\": \"code\",\n \"msg\": \"message\",\n },\n Channel: {\n \"chanId\": \"channel_id\",\n \"channel\": \"channel\",\n \"pair\": ParamName.SYMBOL,\n },\n #\n Trade: [ParamName.ITEM_ID, ParamName.TIMESTAMP, ParamName.AMOUNT, ParamName.PRICE],\n }\n\n # https://docs.bitfinex.com/v2/docs/abbreviations-glossary\n # 10300 : Subscription failed (generic)\n # 10301 : Already subscribed\n # 10302 : Unknown channel\n # 10400 : Unsubscription failed (generic)\n # 10401 : Not subscribed\n # errors = {10000: 'Unknown event',\n # 10001: 'Generic error',\n # 10008: 'Concurrency error',\n # 10020: 'Request parameters error',\n # 10050: 'Configuration setup failed',\n # 10100: 'Failed authentication',\n # 10111: 'Error in authentication request payload',\n # 10112: 'Error in authentication request signature',\n # 10113: 'Error in authentication request encryption',\n # 10114: 'Error in authentication request nonce',\n # 10200: 'Error in un-authentication request',\n # 10300: 'Subscription Failed (generic)',\n # 10301: 'Already Subscribed',\n # 10302: 'Unknown channel',\n # 10400: 'Subscription Failed (generic)',\n # 10401: 'Not subscribed',\n # 11000: 'Not ready, try again later',\n # 20000: 'User is invalid!',\n # 20051: 'Websocket server stopping',\n # 20060: 'Websocket server resyncing',\n # 20061: 'Websocket server resync complete'\n # }\n error_code_by_platform_error_code = {\n # 10000: ErrorCode.WRONG_EVENT,\n 10001: ErrorCode.WRONG_SYMBOL,\n # 10305: ErrorCode.CHANNEL_LIMIT,\n }\n event_type_param = \"event\"\n\n # For converting time\n is_source_in_milliseconds = True\n\n def __init__(self, platform_id=None, version=None):\n self.channel_by_id = {}\n super().__init__(platform_id, version)\n\n def _generate_subscription(self, endpoint, symbol=None, **params):\n channel = super()._generate_subscription(endpoint, symbol, **params)\n return (channel, symbol)\n\n def parse(self, endpoint, data):\n # if data:\n # endpoint = data.get(self.event_type_param)\n # if \"data\" in data:\n # data = data[\"data\"]\n if isinstance(data, list):\n # [284792,[[306971149,1540470353199,-0.76744631,0.031213],...] (1)\n # todo add tests\n # or [102165,\"te\",[306995378,1540485961266,-0.216139,0.031165]]\n # or [102165,\"tu\",[306995378,1540485961266,-0.216139,0.031165]] (2)\n channel_id = data[0]\n channel = self.channel_by_id.get(channel_id)\n if channel:\n # Get endpoint by channel\n endpoint = None\n for k, v in self.endpoint_lookup.items():\n if v == channel.channel:\n endpoint = k\n\n # Parse\n if data[1] == \"tu\":\n # Skip \"tu\" as an item have been already added as \"te\"\n return None\n # if data[1] == \"te\":\n # # Skip \"te\" as an item has no id yet, waiting for \"tu\" (actually there is an id already)\n # return None\n\n # (data[1] - for v1, data[1] or [data[2]] - for v2, see above (1) and (2) examples)\n real_data = data[1] if isinstance(data[1], list) else [data[2]]\n\n result = super().parse(endpoint, real_data)\n\n # Set symbol\n for item in result:\n if hasattr(item, ParamName.SYMBOL):\n item.symbol = channel.symbol\n return result\n\n return super().parse(endpoint, data)\n\n def _parse_item(self, endpoint, item_data):\n result = super()._parse_item(endpoint, item_data)\n\n if isinstance(result, Channel):\n self.channel_by_id[result.channel_id] = result\n elif result and isinstance(result, Trade):\n if result.symbol and result.symbol.begins_with(\".\"):\n return None\n\n if not result.item_id:\n result.item_id = \"%s_%s_%s\" % (result.timestamp, result.price, result.amount)\n # Determine direction\n result.direction = Direction.BUY if result.amount > 0 else Direction.SELL\n # Stringify and check sign\n result.price = str(result.price)\n result.amount = str(result.amount) if result.amount > 0 else str(-result.amount)\n return result\n\n\n# (not necessary)\nclass BitfinexWSConverterV1(BitfinexWSConverterV2):\n # Main params:\n base_url = \"wss://api.bitfinex.com/ws/{version}/\"\n\n # # Settings:\n #\n # # Converting info:\n # # For converting to platform\n # endpoint_lookup = {\n # Endpoint.TRADE: \"trades\",\n # }\n\n # For parsing\n param_lookup_by_class = {\n Error: {\n \"code\": \"code\",\n \"msg\": \"message\",\n },\n Info: {\n \"code\": \"code\",\n \"msg\": \"message\",\n },\n Channel: {\n \"channel\": \"channel\",\n \"chanId\": \"channel_id\",\n \"pair\": ParamName.SYMBOL,\n },\n # [ 5, \"te\", \"1234-BTCUSD\", 1443659698, 236.42, 0.49064538 ]\n # Trade: [\"\", \"\", ParamName.ITEM_ID, ParamName.TIMESTAMP, ParamName.PRICE, ParamName.AMOUNT],\n Trade: [ParamName.ITEM_ID, ParamName.TIMESTAMP, ParamName.PRICE, ParamName.AMOUNT],\n }\n\n # # 10300 : Subscription failed (generic)\n # # 10301 : Already subscribed\n # # 10302 : Unknown channel\n # # 10400 : Unsubscription failed (generic)\n # # 10401 : Not subscribed\n # error_code_by_platform_error_code = {\n # # 10000: ErrorCode.WRONG_EVENT,\n # 10001: ErrorCode.WRONG_SYMBOL,\n # }\n #\n # # For converting time\n # # is_source_in_milliseconds = True\n\n # def parse_item(self, endpoint, item_data):\n # result = super().parse_item(endpoint, item_data)\n #\n # # Convert Channel.symbol \"tXXXYYY\" -> \"XXXYYY\"\n # if result and isinstance(result, Channel) and result.symbol:\n # if result.symbol[0] == \"t\":\n # result.symbol = result.symbol[1:]\n #\n # return result\n\n\nclass BitfinexWSClient(WSClient):\n # TODO consider reconnection and resubscription\n # TODO consider reconnect on connection, pong and other timeouts\n\n # Settings:\n platform_id = Platform.BITFINEX\n version = \"2\" # Default version\n\n _converter_class_by_version = {\n \"1\": BitfinexWSConverterV1,\n \"2\": BitfinexWSConverterV2,\n }\n\n # State:\n\n def _send_subscribe(self, subscriptions):\n for channel, symbol in subscriptions:\n trading_pair_symbol = \"t\" + symbol\n event_data = {\n \"event\": \"subscribe\",\n \"channel\": channel,\n \"symbol\": trading_pair_symbol}\n self._send(event_data)\n\n def _parse(self, endpoint, data):\n if isinstance(data, list) and len(data) > 1 and data[1] == \"hb\":\n # Heartbeat. skip for now...\n return None\n return super()._parse(endpoint, data)\n\n # Закомментированные методы можно свободно удалять, если проще переносить код из другой библиотеки заново\n\n # def on_item_received(self, item):\n # # if isinstance(item, Channel):\n # # self.channel_by_id[item.channel_id] = item\n # # return\n # #\n # super().on_item_received(item)\n #\n # # # Handle data\n # # if isinstance(data, dict):\n # # # This is a system message\n # # self._system_handler(data, received_at)\n # # else:\n # # # This is a list of data\n # # if data[1] == 'hb':\n # # self._heartbeat_handler()\n # # else:\n # # self._data_handler(data, received_at)\n\n # def _system_handler(self, data, ts):\n # \"\"\"Distributes system messages to the appropriate handler.\n # System messages include everything that arrives as a dict,\n # or a list containing a heartbeat.\n # :param data:\n # :param ts:\n # :return:\n # \"\"\"\n # self.log.debug(\"_system_handler(): Received a system message: %s\", data)\n # # Unpack the data\n # event = data.pop('event')\n # if event == 'pong':\n # self.log.debug(\"_system_handler(): Distributing %s to _pong_handler..\",\n # data)\n # self._pong_handler()\n # elif event == 'info':\n # self.log.debug(\"_system_handler(): Distributing %s to _info_handler..\",\n # data)\n # self._info_handler(data)\n # elif event == 'error':\n # self.log.debug(\"_system_handler(): Distributing %s to _error_handler..\",\n # data)\n # self._error_handler(data)\n # elif event in ('subscribed', 'unsubscribed', 'conf', 'auth', 'unauth'):\n # self.log.debug(\"_system_handler(): Distributing %s to \"\n # \"_response_handler..\", data)\n # self._response_handler(event, data, ts)\n # else:\n # self.log.error(\"Unhandled event: %s, data: %s\", event, data)\n\n # if event_name in ('subscribed', 'unsubscribed', 'conf', 'auth', 'unauth'):\n # try:\n # self._response_handlers[event_name](event_name, data, ts)\n # except KeyError:\n # self.log.error(\"Dtype '%s' does not have a response \"\n # \"handler! (%s)\", event_name, message)\n # elif event_name == 'data':\n # try:\n # channel_id = data[0]\n # if channel_id != 0:\n # # Get channel type associated with this data to the\n # # associated data type (from 'data' to\n # # 'book', 'ticker' or similar\n # channel_type, *_ = self.channel_directory[channel_id]\n #\n # # Run the associated data handler for this channel type.\n # self._data_handlers[channel_type](channel_type, data, ts)\n # # Update time stamps.\n # self.update_timestamps(channel_id, ts)\n # else:\n # # This is data from auth channel, call handler\n # self._handle_account(data=data, ts=ts)\n # except KeyError:\n # self.log.error(\"Channel ID does not have a data handler! %s\",\n # message)\n # else:\n # self.log.error(\"Unknown event_name on queue! %s\", message)\n # continue\n\n # self._response_handlers = {'unsubscribed': self._handle_unsubscribed,\n # 'subscribed': self._handle_subscribed,\n # 'conf': self._handle_conf,\n # 'auth': self._handle_auth,\n # 'unauth': self._handle_auth}\n # self._data_handlers = {'ticker': self._handle_ticker,\n # 'book': self._handle_book,\n # 'raw_book': self._handle_raw_book,\n # 'candles': self._handle_candles,\n # 'trades': self._handle_trades}\n\n # https://github.com/Crypto-toolbox/btfxwss/blob/master/btfxwss/queue_processor.py\n\n # def _handle_subscribed(self, dtype, data, ts,):\n # \"\"\"Handles responses to subscribe() commands.\n # Registers a channel id with the client and assigns a data handler to it.\n # :param dtype:\n # :param data:\n # :param ts:\n # :return:\n # \"\"\"\n # self.log.debug(\"_handle_subscribed: %s - %s - %s\", dtype, data, ts)\n # channel_name = data.pop('channel')\n # channel_id = data.pop('chanId')\n # config = data\n #\n # if 'pair' in config:\n # symbol = config['pair']\n # if symbol.startswith('t'):\n # symbol = symbol[1:]\n # elif 'symbol' in config:\n # symbol = config['symbol']\n # if symbol.startswith('t'):\n # symbol = symbol[1:]\n # elif 'key' in config:\n # symbol = config['key'].split(':')[2][1:] #layout type:interval:tPair\n # else:\n # symbol = None\n #\n # if 'prec' in config and config['prec'].startswith('R'):\n # channel_name = 'raw_' + channel_name\n #\n # self.channel_handlers[channel_id] = self._data_handlers[channel_name]\n #\n # # Create a channel_name, symbol tuple to identify channels of same type\n # if 'key' in config:\n # identifier = (channel_name, symbol, config['key'].split(':')[1])\n # else:\n # identifier = (channel_name, symbol)\n # self.channel_handlers[channel_id] = identifier\n # self.channel_directory[identifier] = channel_id\n # self.channel_directory[channel_id] = identifier\n # self.log.info(\"Subscription succesful for channel %s\", identifier)\n #\n # def _handle_unsubscribed(self, dtype, data, ts):\n # \"\"\"Handles responses to unsubscribe() commands.\n # Removes a channel id from the client.\n # :param dtype:\n # :param data:\n # :param ts:\n # :return:\n # \"\"\"\n # self.log.debug(\"_handle_unsubscribed: %s - %s - %s\", dtype, data, ts)\n # channel_id = data.pop('chanId')\n #\n # # Unregister the channel from all internal attributes\n # chan_identifier = self.channel_directory.pop(channel_id)\n # self.channel_directory.pop(chan_identifier)\n # self.channel_handlers.pop(channel_id)\n # self.last_update.pop(channel_id)\n # self.log.info(\"Successfully unsubscribed from %s\", chan_identifier)\n #\n # def _handle_auth(self, dtype, data, ts):\n # \"\"\"Handles authentication responses.\n # :param dtype:\n # :param data:\n # :param ts:\n # :return:\n # \"\"\"\n # # Contains keys status, chanId, userId, caps\n # if dtype == 'unauth':\n # raise NotImplementedError\n # channel_id = data.pop('chanId')\n # user_id = data.pop('userId')\n #\n # identifier = ('auth', user_id)\n # self.channel_handlers[identifier] = channel_id\n # self.channel_directory[identifier] = channel_id\n # self.channel_directory[channel_id] = identifier\n\n # def _handle_trades(self, dtype, data, ts):\n # \"\"\"Files trades in self._trades[chan_id].\n # :param dtype:\n # :param data:\n # :param ts:\n # :return:\n # \"\"\"\n # self.log.debug(\"_handle_trades: %s - %s - %s\", dtype, data, ts)\n # channel_id, *data = data\n # channel_identifier = self.channel_directory[channel_id]\n # entry = (data, ts)\n # self.trades[channel_identifier].put(entry)\n\n def _send_auth(self):\n # Generate nonce\n auth_nonce = str(int(time.time() * 10000000))\n # Generate signature\n auth_payload = \"AUTH\" + auth_nonce\n auth_sig = hmac.new(self._api_secret.encode(), auth_payload.encode(),\n hashlib.sha384).hexdigest()\n\n payload = {\"event\": \"auth\", \"apiKey\": self._api_key, \"authSig\": auth_sig,\n \"authPayload\": auth_payload, \"authNonce\": auth_nonce}\n\n self._send(payload)\n\n\n# # Auth v1:\n# import hmac\n# import hashlib\n# import time\n#\n# nonce = int(time.time() * 1000000)\n# auth_payload = \"AUTH\" + str(nonce)\n# signature = hmac.new(\n# API_SECRET.encode(),\n# msg = auth_payload.encode(),\n# digestmod = hashlib.sha384\n# ).hexdigest()\n#\n# payload = {\n# \"apiKey\": API_KEY,\n# \"event\": \"auth\",\n# \"authPayload\": auth_payload,\n# \"authNonce\": nonce,\n# \"authSig\": signature\n# }\n#\n# ws.send(json.dumps(payload))\n\n# https://github.com/bitfinexcom/bitfinex-api-node\n# How do te and tu messages differ?\n# A te packet is sent first to the client immediately after a trade has been\n# matched & executed, followed by a tu message once it has completed processing.\n# During times of high load, the tu message may be noticably delayed, and as\n# such only the te message should be used for a realtime feed.\n" } ]
18
alenrooni/sentanal
https://github.com/alenrooni/sentanal
daa5ed0e9540fd1c78d8bb4f1dac87350397fdd3
7f0e4ac8475f5a82d7a020ed35571a9cc8023e5d
08c0047c448a5fb7c422a6d82f5c003bfd0ccb05
refs/heads/master
2021-01-22T20:07:58.533935
2013-11-22T11:43:15
2013-11-22T11:43:15
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6468750238418579, "alphanum_fraction": 0.668749988079071, "avg_line_length": 19.46666717529297, "blob_id": "4af0b904b47c4fb45d87934e9a0552d109687200", "content_id": "ef993ede3d955c8a13204b14a0a9bd0e4e487719", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 107, "num_lines": 15, "path": "/twittertest/tweetstreamtest.py", "repo_name": "alenrooni/sentanal", "src_encoding": "UTF-8", "text": "'''\r\nCreated on Nov 22, 2013\r\n\r\n@author: usr\r\n'''\r\nimport tweetstream\r\n\r\nwords = words = [\"opera\", \"firefox\", \"safari\", \"ie\", \"chrome\"]\r\npeople = None\r\nlocations = None\r\n\r\n\r\nstream = tweetstream.FilterStream(\"alenrooni\", \"M0sht@ri\", track=words, follow=people, locations=locations)\r\nfor tweet in stream:\r\n print tweet" }, { "alpha_fraction": 0.6674268245697021, "alphanum_fraction": 0.6735081672668457, "avg_line_length": 24.565656661987305, "blob_id": "0c6ab3d8c0fcc18abc180e14894ef497e85d601b", "content_id": "b1245dc3c93354c203de347e1afea9c56e8ff551", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2631, "license_type": "no_license", "max_line_length": 91, "num_lines": 99, "path": "/twittertest/twittertest.py", "repo_name": "alenrooni/sentanal", "src_encoding": "UTF-8", "text": "'''\r\nCreated on Nov 21, 2013\r\n\r\n@author: usr\r\n'''\r\n#!/usr/bin/env python\r\n\r\n'''Post a message to twitter'''\r\n\r\n__author__ = '[email protected]'\r\n\r\nimport ConfigParser\r\nimport getopt\r\nimport os\r\nimport sys\r\nimport twitter\r\n\r\n\r\nUSAGE = '''Usage: tweet [options] message\r\n\r\n This script posts a message to Twitter.\r\n\r\n Options:\r\n\r\n -h --help : print this help\r\n --consumer-key : the twitter consumer key\r\n --consumer-secret : the twitter consumer secret\r\n --access-key : the twitter access token key\r\n --access-secret : the twitter access token secret\r\n --encoding : the character set encoding used in input strings, e.g. \"utf-8\". [optional]\r\n\r\n Documentation:\r\n\r\n If either of the command line flags are not present, the environment\r\n variables TWEETUSERNAME and TWEETPASSWORD will then be checked for your\r\n consumer_key or consumer_secret, respectively.\r\n\r\n If neither the command line flags nor the enviroment variables are\r\n present, the .tweetrc file, if it exists, can be used to set the\r\n default consumer_key and consumer_secret. The file should contain the\r\n following three lines, replacing *consumer_key* with your consumer key, and\r\n *consumer_secret* with your consumer secret:\r\n\r\n A skeletal .tweetrc file:\r\n\r\n [Tweet]\r\n consumer_key: *consumer_key*\r\n consumer_secret: *consumer_password*\r\n access_key: *access_key*\r\n access_secret: *access_password*\r\n\r\n'''\r\n\r\nclass TweetRc(object):\r\n def __init__(self):\r\n self._config = None\r\n\r\n def GetConsumerKey(self):\r\n return self._GetOption('consumer_key')\r\n\r\n def GetConsumerSecret(self):\r\n return self._GetOption('consumer_secret')\r\n\r\n def GetAccessKey(self):\r\n return self._GetOption('access_key')\r\n\r\n def GetAccessSecret(self):\r\n return self._GetOption('access_secret')\r\n\r\n def _GetOption(self, option):\r\n try:\r\n return self._GetConfig().get('Tweet', option)\r\n except:\r\n return None\r\n\r\n def _GetConfig(self):\r\n if not self._config:\r\n self._config = ConfigParser.ConfigParser()\r\n print 'reading config file'\r\n self._config.read('.tweetrc')\r\n return self._config\r\n\r\n\r\nrc = TweetRc()\r\nconsumer_key = rc.GetConsumerKey()\r\nconsumer_secret = rc.GetConsumerSecret()\r\naccess_key = rc.GetAccessKey()\r\naccess_secret = rc.GetAccessSecret()\r\nprint access_key\r\n\r\napi = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret,\r\n access_token_key=access_key, access_token_secret=access_secret,\r\n input_encoding='utf-8')\r\n\r\n\r\n#status = api.PostUpdate('hhhhhhhhhhhhhhhhh')\r\ntws = api.GetTrendsWoeid(12789690)\r\nfor tw in tws:\r\n print tw\r\n " } ]
2
DebeshNayak/test
https://github.com/DebeshNayak/test
4f865c18d70e2ed1fdd1f22ab952199870140d93
21628b9923eb07d321bf7eb560902064992f2cbe
b6dcadd96268e40cece9a4bcfec3c4d157b6f3ff
refs/heads/main
2023-01-28T23:29:38.934650
2020-12-05T15:32:04
2020-12-05T15:32:04
76,848,855
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8163265585899353, "alphanum_fraction": 0.8163265585899353, "avg_line_length": 23.5, "blob_id": "bbdc45474257c6971a04745d5cd54346da08a2c4", "content_id": "7aece975ead11ee6dc2bf6ddb5a001d1298b7092", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 30, "num_lines": 2, "path": "/feature.py", "repo_name": "DebeshNayak/test", "src_encoding": "UTF-8", "text": "Some changes on feature branch\nnew line is added\n" } ]
1
saurabh0010/Git-Commands
https://github.com/saurabh0010/Git-Commands
6b815e90ba693e035f8b0725b303f81f94b0f13a
bbbf0359eebaab1a4e85c77f14383026a1c2a6f3
5a08ae4fbc9f7d232759ac37e4e57643cb851b32
refs/heads/master
2021-09-04T21:35:55.599268
2018-01-22T12:00:25
2018-01-22T12:00:25
117,953,300
0
8
null
null
null
null
null
[ { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 19, "blob_id": "9aeae751b34252ec3995843e0fc40947b3db3933", "content_id": "ae87d055e4b648e4b351bac9e8bee0d374abedc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20, "license_type": "no_license", "max_line_length": 19, "num_lines": 1, "path": "/git1.py", "repo_name": "saurabh0010/Git-Commands", "src_encoding": "UTF-8", "text": "print(\"I love Git\")\n" } ]
1
lukejuusola/AoCMM2016-Traffic
https://github.com/lukejuusola/AoCMM2016-Traffic
d6ac286598d697cffcc13fbf74d551588e0fc681
ed69aa93af9459f7152257e5b9dea1b16555ceaa
81f6f2ced384b98d6ab9acbfab377769a3f8d3d5
refs/heads/master
2021-03-27T15:54:45.682915
2016-10-18T22:45:07
2016-10-18T22:45:07
70,879,866
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5394088625907898, "alphanum_fraction": 0.5763546824455261, "avg_line_length": 21.55555534362793, "blob_id": "d814534fbaf444785b02cc43eea18d5d1b77d09d", "content_id": "3a11c460b48ff261afa7b784dfb3a872b02fccd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 406, "license_type": "no_license", "max_line_length": 61, "num_lines": 18, "path": "/MaxValue.py", "repo_name": "lukejuusola/AoCMM2016-Traffic", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\n\ndef MaxValue(f, X, Y):\n\tXl, Yl = np.meshgrid(X, Y)\n\tvf = np.vectorize(f)\n\tZ = vf(Xl, Yl)\n\tindex = np.argmax(Z)\n\tx_in = math.floor(index/50)\n\ty_in = index%50\n\treturn (X[x_in], Y[y_in], Z[x_in][y_in])\n\n\nif __name__ == '__main__':\n\tx_mean = .84\n\ty_mean = .12\n\tf = lambda x,y: 1 - math.sqrt((x-x_mean)**2 + (y-y_mean)**2)\n\tprint(MaxValue(f, np.linspace(0,1), np.linspace(0,1)))\n" }, { "alpha_fraction": 0.5511398911476135, "alphanum_fraction": 0.594577968120575, "avg_line_length": 30.568628311157227, "blob_id": "fdeeb00ed0d38bb6b611a984601d8185c9332c67", "content_id": "f9484d872f03ba3460514df50f7c90448e5fb017", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3246, "license_type": "no_license", "max_line_length": 105, "num_lines": 102, "path": "/gradient.py", "repo_name": "lukejuusola/AoCMM2016-Traffic", "src_encoding": "UTF-8", "text": "from CrashMap import CrashMap\nimport numpy as np\nimport scipy\nfrom Plot import plot\nimport random\n\nm_x = (-10,10)\nm_y = (-10,10)\nstdx = 1\nstdy = 1\n\ndef fscore(f1, f2):\n return lambda x,y: (f1(x,y) - f2(x,y))**2\n\ndef calcInt(f):\n score, error =scipy.integrate.quadpack.dblquad(f, m_x[0], m_x[1], lambda x: m_y[0], lambda x: m_y[1])\n return score\n\n\n\ndef calcGradient(ambulances, crashmap):\n delx = 0.1\n amb_map = CrashMap(ambulances, stdx, stdy)\n score = calcInt(fscore(crashmap(x,y), amb_map(x,y)))\n deltas = []\n for i in range(0, len(ambulances)):\n x0, y0 = ambulances[i]\n ambulances[i] = (x0 - delx, y0)\n mapx0 = CrashMap(ambulances, stdx, stdy)\n #plot (lambda x,y: (crashmap(x,y) - amb_map(x,y))**2, -5, 5, -5, 5)\n ambulances[i] = (x0 + delx, y0)\n mapx1 = CrashMap(ambulances, stdx, stdy)\n #plot (lambda x,y: (crashmap(x,y) - amb_map(x,y))**2, -5, 5, -5, 5)\n ambulances[i] = (x0, y0-delx)\n mapy0 = CrashMap(ambulances, stdx, stdy)\n ambulances[i] = (x0, y0+delx)\n mapy1 = CrashMap(ambulances, stdx, stdy)\n ambulances[i] = (x0, y0)\n \n scoreX0 = calcInt(fscore(crashmap(x,y), mapx0(x,y)))\n scoreX1 = calcInt(fscore(crashmap(x,y), mapx1(x,y)))\n scoreY0 = calcInt(fscore(crashmap(x,y), mapy0(x,y)))\n scoreY1 = calcInt(fscore(crashmap(x,y), mapy1(x,y)))\n \n dx = (scoreX1 - scoreX0)/(2*delx)\n dy = (scoreY1 - scoreY0)/(2*delx)\n deltas.append((dx, dy))\n return deltas\n\ndef update(ambulances, crashmap, rate):\n #rate = 10\n grad = calcGradient(ambulances, crashmap)\n for i in range(0, len(ambulances)):\n x0, y0 = ambulances[i]\n x1 = x0 - rate * grad[i][0]\n y1 = y0 - rate * grad[i][1]\n ambulances[i] = (x1, y1)\n return grad\n\nnum_amb = 3\nnum_crashes = 4\n\nambulances = []\ncrashes = []\nfor i in range(0, num_amb):\n ambulances.append((random.uniform(-5,5), random.uniform(-5,5)))\nfor i in range(0, num_crashes):\n crashes.append((random.uniform(-5,5), random.uniform(-5,5)))\n\ncrashmap = CrashMap(crashes, stdx, stdy)\nplot(crashmap, -6, 6, -6, 6)\namb_map = CrashMap(ambulances, stdx, stdy)\n#gradient = calcGradient(ambulances, crashmap)\nscore = calcInt(fscore(amb_map, crashmap))\nlastscore = 0\n#print abs(lastscore - score)/score\ncount = 0\ngradsum = 1\nrate = 10\nwhile(gradsum > 10**-8):\n #amb_map = CrashMap(ambulances, stdx, stdy)\n #plot(lambda x,y: (amb_map(x,y) - crashmap(x,y))**2, -10, 10, -10, 10)\n grads = update(ambulances, crashmap, rate)\n gradsum = sum(abs(x)+ abs(y) for x,y in grads)\n lastscore = score\n score = calcInt(fscore(CrashMap(ambulances, stdx, stdy), crashmap))\n if score < lastscore:\n print abs(score-lastscore)/score\n rate *= 1 + 10*abs(score - lastscore)/score\n else:\n print \"reset\"\n rate *= 0.5\n count += 1;\n if count % 10 == 0:\n print ambulances\n print gradsum\n print rate\n #print lastscore, score\n #print (lastscore-score)/score\nplot(crashmap, -10, 10, -10, 10)\nplot(CrashMap(ambulances, stdx, stdy), -10, 10, -10, 10)\nplot(fscore(CrashMap(ambulances, stdx, stdy),crashmap), -10, 10, -10, 10)\n \n\n \n \n\n \n" }, { "alpha_fraction": 0.6709359884262085, "alphanum_fraction": 0.6778324842453003, "avg_line_length": 35.25, "blob_id": "bb475f291bc9bbe78e15749f3b0eecca265007f3", "content_id": "e7bced08f3a218451f5f08a41c58c7fcdf4e8020", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 114, "num_lines": 28, "path": "/readin.py", "repo_name": "lukejuusola/AoCMM2016-Traffic", "src_encoding": "UTF-8", "text": "import numpy\n\n#Finds number of lines in file\ndef file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n# Reads the data in \"filein\" into a matrix. \"num\" is the number of data points per line.\n# For raw output (files of form \"out__.txt\") num = 4. The coordinates are (latitude, longitude, #casualties, hour)\n# For nodelist, num = 3. Coordinates: (nodeID, longitude, latitude)\n# For node-accident assignment files (files of form \"node__.txt\") num = 2. Coordinates: (nodeID, #accidents)\n# Data is returned as a matrix with each column as a different node/accident, and each row n as all the\n# nth coordinate values of the data set.\ndef readData(filein, num):\n\tfilename = filein\n\tpoints = file_len(filename)\n\tdata = numpy.zeros((num, points))\n\tind = 0\n\tf = open(filename)\n\tfor line in f:\n\t\ttemp = line.split()\n\t\ttempFl = numpy.zeros((num, 1))\n\t\tfor x in range(num):\n\t\t\ttempFl[x] = float(temp[x])\n\t\tdata[:, ind] = numpy.transpose(tempFl)\n\t\tind += 1\n\treturn data\n" }, { "alpha_fraction": 0.626158595085144, "alphanum_fraction": 0.6735324263572693, "avg_line_length": 27.52941131591797, "blob_id": "94834fa5b27b3c9c050d2587008d58fac94d445c", "content_id": "7c71c774f8f094678f64ab07ad5afab8472bdab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 971, "license_type": "no_license", "max_line_length": 76, "num_lines": 34, "path": "/PointPicker.py", "repo_name": "lukejuusola/AoCMM2016-Traffic", "src_encoding": "UTF-8", "text": "from CrashMap import CrashMap\nfrom MaxValue import MaxValue\nimport numpy as np\nfrom Plot import plot\n\namb_std = 2\ncrash_std = 1\n\ndef MapDifference(f, h):\n\tdef difference(x,y):\n\t\treturn f(x,y) - h(x,y)\n\treturn difference\n\ndef NaiveContinuousSolution(crashes, totalPicked):\n\tpicks = []\n\tcrashMap = CrashMap(crashes, crash_std, crash_std)\n\tX = np.linspace(-5, 5)\n\tY = np.linspace(-5, 5)\n\tfor i in range(totalPicked):\n\t\theatMap = crashMap\n\t\tif len(picks) != 0:\n\t\t\tambMap = CrashMap(picks, amb_std, amb_std)\n\t\t\theatMap = MapDifference(crashMap, ambMap)\n\t\tpicks.append(MaxValue(heatMap, X, Y)[:2])\n\treturn picks\n\nif __name__ == '__main__':\n\ttest_crashes = [(1.25,1.25),(1.75,1.75), (1.25,1.75), (1.75,1.25), (-2,-2)]\n\tpicks = NaiveContinuousSolution(test_crashes)\n\tambMap = CrashMap(picks, amb_std, amb_std)\n\tcrashMap = CrashMap(test_crashes, crash_std, crash_std)\n\tplot(crashMap, -5, 5, -5, 5)\n\tplot(ambMap, -5, 5, -5, 5)\n\tplot(MapDifference(crashMap, ambMap), -5, 5, -5, 5)\n\n" }, { "alpha_fraction": 0.7621621489524841, "alphanum_fraction": 0.7621621489524841, "avg_line_length": 29.83333396911621, "blob_id": "62b29f218cf6ad27ed432a1a9b54957c5a89d960", "content_id": "4fd773c52036e5b716c8ec5b7f7dab79c30e937b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 185, "license_type": "no_license", "max_line_length": 47, "num_lines": 6, "path": "/README.txt", "repo_name": "lukejuusola/AoCMM2016-Traffic", "src_encoding": "UTF-8", "text": "For Ubuntu, to install dependencies:\nstart virtualenv with source .venv/bin/activate\n(if you don't have it, run 'virtualenv .venv')\npip install -r requirements.txt\n\nThen you're done :)\n" }, { "alpha_fraction": 0.4749999940395355, "alphanum_fraction": 0.6875, "avg_line_length": 15, "blob_id": "65bf7a13be21876bd3ecf9e2c1d2215a59e34e53", "content_id": "b7ba31ec9ceed1f8ea61a16a11f846161e1761e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 80, "license_type": "no_license", "max_line_length": 20, "num_lines": 5, "path": "/Constants.py", "repo_name": "lukejuusola/AoCMM2016-Traffic", "src_encoding": "UTF-8", "text": "stdy = 1.0\nstdx = 1.0\nsafetyWeight = 500.0\nambCost = 5000.0\npriceWeight = 100.0\n" }, { "alpha_fraction": 0.6352739930152893, "alphanum_fraction": 0.6558219194412231, "avg_line_length": 29.736841201782227, "blob_id": "af4e0b95e70136665200ccf7f3a516d5a21d61bf", "content_id": "0336bb8cd0b9696182630af9a13bdc58248d952b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 584, "license_type": "no_license", "max_line_length": 70, "num_lines": 19, "path": "/Plot.py", "repo_name": "lukejuusola/AoCMM2016-Traffic", "src_encoding": "UTF-8", "text": "from CrashMap import CrashMap\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\ndef plot(f, leftX, rightX, leftY, rightY):\n\tfig = plt.figure()\n\tax = fig.gca(projection='3d')\n\tvf = np.vectorize(f)\n\tX = np.linspace(leftX, rightX, 100)\n\tY = np.linspace(leftY, rightY, 100)\n\tX, Y = np.meshgrid(X, Y)\n\tZ = vf(X, Y)\n #surf = ax.contourf(X, Y, Z)\n #plt.contourf(X, Y, Z)\n\tsurf = ax.plot_surface(X,Y,Z, rstride=2, cstride=2, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n\tplt.show()\n" }, { "alpha_fraction": 0.6207839846611023, "alphanum_fraction": 0.6709206700325012, "avg_line_length": 26.325000762939453, "blob_id": "1d630e28d47a03a927612f95c369a0a9aad230bc", "content_id": "626f8d76ce295c5bb90ef5bd1ad1a47b86dd4834", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1097, "license_type": "no_license", "max_line_length": 82, "num_lines": 40, "path": "/NaiveContinuousComplete.py", "repo_name": "lukejuusola/AoCMM2016-Traffic", "src_encoding": "UTF-8", "text": "from PointPicker import *\nfrom CrashMap import CrashMap\nfrom readin import *\nfrom scipy.integrate import dblquad\nimport matplotlib.pyplot as plt\nimport random\n\nx1 = -5\nx2 = 5\ny1 = -5\ny2 = 5\nambCost = 2\nmax_n = 20\n\ndef Score(crashes, ambulances):\n\tif(len(ambulances) == 0 or len(crashes) == 0):\n\t\treturn\n\tcrashMap = CrashMap(crashes, crash_std, crash_std)\n\tambMap = CrashMap(ambulances, amb_std, amb_std)\n\tSquareDiff = lambda x, y: (MapDifference(crashMap, ambMap)(x,y))**2\n\tsafety = 1./dblquad(SquareDiff, x1, x2, lambda x: y1, lambda x: y2)[0]\n\t#return safety - len(ambulances)*ambCost \n\treturn safety\n\ndef FindOptimumN(crashes):\n\tret = []\n\tfor n in range(1,max_n):\n\t\tambs = NaiveContinuousSolution(crashes, n)\n\t\tret.append((n, Score(crashes, ambs)))\n\treturn ret\n\nif __name__ == '__main__':\n\tcrashes = []\n\t#crashes = [(1.25,1.25),(1.75,1.75), (1.25,1.75), (1.75,1.25), (-2,-2)]\n\tfor i in range(50):\n\t\tcrashes.append((random.randint(-2, 2), random.randint(-2, 2)))\n\n\tpoints = FindOptimumN(crashes)\n\tplt.scatter(list(map(lambda x: x[0], points)), list(map(lambda x: x[1], points)))\n\tplt.show()\n\t\n\t\n" }, { "alpha_fraction": 0.6415694355964661, "alphanum_fraction": 0.6670201420783997, "avg_line_length": 29.29032325744629, "blob_id": "39e3a21f5b2b1413e8e227ca0cbe797865825675", "content_id": "1cef8660ed43dab42ff1b667fda4429e563b4f68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 943, "license_type": "no_license", "max_line_length": 86, "num_lines": 31, "path": "/CrashMap.py", "repo_name": "lukejuusola/AoCMM2016-Traffic", "src_encoding": "UTF-8", "text": "import math\nfrom scipy.integrate import dblquad, IntegrationWarning\nimport numpy as np\nimport warnings\nimport copy\nwarnings.simplefilter(\"ignore\", IntegrationWarning)\nwarnings.simplefilter(\"ignore\", UserWarning)\n\nmanhattan_x = (-10, 10)\nmanhattan_y = (-10, 10)\n\n#Assume dataset is in form [(x_0, y_0), ..., (x_n, y_n)] where x, y is gps coordinates\ndef CrashMap(dataset, stdx, stdy):\n\tnew_dataset = copy.deepcopy(dataset)\n\tdef freqMap(x, y):\n\t\tz = 0.0\n\t\tC = 1.0 # Normalization constant. Definitely needs to be tweeked\n\t\t# Should just be able to divide in the end.\n\t\tfor x_i,y_i in new_dataset:\n\t\t\tdx = (x - x_i)\n\t\t\tdy = (y - y_i)\n\t\t\texponent = -(dx**2/(2*stdx**2) + dy**2/(2*stdy**2))\n\t\t\tz += C*math.exp(exponent)\n\t\treturn z\n\tnorm_c, error = dblquad(freqMap, manhattan_x[0], manhattan_x[1],\\\n\t\t\t\t\t\t\t\tlambda x: manhattan_y[0],\\\n\t\t\t\t\t\t\t\tlambda x: manhattan_y[1])\n\tdef normedFreqMap(x,y):\n\t\treturn freqMap(x,y)/norm_c\n\n\treturn normedFreqMap\n\n\n\n\n" } ]
9
Space-Odie/Zybo_Projects
https://github.com/Space-Odie/Zybo_Projects
a92ed0885d4b9cdbef93dfbddc6589763452ad57
a5a1690bbcdfa6c849f3a48ffc891298a794d6ee
417c83640301af3e55c054d2eb7d83a2a05d465b
refs/heads/main
2023-05-02T03:43:31.726020
2021-05-19T02:47:07
2021-05-19T02:47:07
368,724,711
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.45922747254371643, "alphanum_fraction": 0.49041488766670227, "avg_line_length": 23.88888931274414, "blob_id": "9944325d58346e0a5da4cdb964d6ccb24acde969", "content_id": "d50f91d3f7766c022c585da9d61e92d334c6fd7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3495, "license_type": "no_license", "max_line_length": 75, "num_lines": 135, "path": "/Zybo_MiniProjects/Labs_SourceCode/Lab3.c", "repo_name": "Space-Odie/Zybo_Projects", "src_encoding": "UTF-8", "text": "/*----------------------------------------------------------\r\n 520 L Lab 3 Spring 2020\r\n -----------------------------------------------------------\r\n File Name: lab3.c\r\n Author: Ian O'Donnell\r\n -----------------------------------------------------------\r\n Version Date Description\r\n 1.0 3-10-2020 Initial Release\r\n -----------------------------------------------------------\r\n Purpose:\r\n Use a Multiplier that is created in VHDL.\r\n Multiplier is written to with one 32 bit input that has\r\n two 16 bit inputs combined together.\r\n Output is one 32 bit output that can be read from.\r\n-----------------------------------------------------------*/\r\n\r\n\r\n#include <stdio.h>\r\n#include \"xbasic_types.h\"\r\n#include \"platform.h\"\r\n#include \"xil_printf.h\"\r\n\r\n#include \"xparameters.h\"\r\n#include \"xstatus.h\"\r\n#include <time.h>\r\n#include <complex.h>\r\n\r\n//Base address of multiplier\r\nXuint32 *baseaddr_p = (Xuint32 *)XPAR_MY_VHDLMULTIPLIER_0_S00_AXI_BASEADDR;\r\n\r\n\r\nint RandGen(int max_value)\r\n{\r\n\treturn 1 + rand() % max_value;\r\n\r\n}\r\n\r\nvoid TaskTwo()\t//Calculate (A +jB)(C + jD)\r\n{\r\n\tprintf(\"Task Two\\n\");\r\n\r\n\tint Max_Value = 15;\r\n\tint A,B,C,D, AC, BD, AD, BC;\r\n\tdouble real, imag;\r\n\r\n\r\n\t//TODO Gather A , B , C and D Values\r\n\tA = RandGen(Max_Value);\r\n\tB = RandGen(Max_Value);\r\n\tC = RandGen(Max_Value);\r\n\tD = RandGen(Max_Value);\r\n\txil_printf(\"A = %d | B = %d j| C = %d | D = %d j\\n\", A, B, C, D);\r\n\r\n\t//TODO Read/Write Values to Memory (uncomment code for troubleshooting)\r\n\t//AC\r\n\t*(baseaddr_p+0) = A <<16 | C;\r\n\t//xil_printf(\"AC Wrote: 0x%08x \\n\", *(baseaddr_p+0));\r\n\t//xil_printf(\"AC Read: 0x%08x \\n\\r\", *(baseaddr_p+1));\r\n\tAC = *(baseaddr_p+1);\r\n\t//BD\r\n\t*(baseaddr_p+0) = B <<16 | D;\r\n\t//xil_printf(\"BD Wrote: 0x%08x \\n\", *(baseaddr_p+0));\r\n\t//xil_printf(\"BD Read: 0x%08x \\n\\r\", *(baseaddr_p+1));\r\n\tBD = *(baseaddr_p+1);\r\n\t//AD\r\n\t*(baseaddr_p+0) = A <<16 | D;\r\n\t//xil_printf(\"AD Wrote: 0x%08x \\n\", *(baseaddr_p+0));\r\n\t//xil_printf(\"AD Read: 0x%08x \\n\\r\", *(baseaddr_p+1));\r\n\tAD = *(baseaddr_p+1);\r\n\t//BC\r\n\t*(baseaddr_p+0) = B <<16 | C;\r\n\t//xil_printf(\"BC Wrote: 0x%08x \\n\", *(baseaddr_p+0));\r\n\t//xil_printf(\"BC Read: 0x%08x \\n\\r\", *(baseaddr_p+1));\r\n\tBC = *(baseaddr_p+1);\r\n\r\n\t//TODO Write Output Files to variables\r\n\r\n\r\n\txil_printf(\"AC = %d BD = %d | AD = %d j| BC = %d j\\n\", AC, BD, AD, BC);\r\n\txil_printf(\"%d - %d + %d j + %d j \\n\", AC, BD, AD, BC);\r\n\t//Calculate Real Portion\r\n\treal = AC - BD;\r\n\t//Calculate Complex number\r\n\timag = AD + BC;\r\n\t//Calculate the Result\r\n\tdouble complex z = real + imag * I;\r\n\r\n\t//printf(\"Real: %f\\n\", real);\r\n\t//printf(\"imag: %f\\n\", imag);\r\n\tprintf(\"result = %.1f% + .1fi\\n\", creal(z), cimag(z));\r\n\r\n\r\n\r\n}\r\n\r\nvoid TaskOne()\r\n{\r\n\r\n\t\tprintf(\"Task One\\n\");\r\n\t\tint Max_Value = 9999;\r\n\t\tint a = 0; int b = 1;\r\n\t\tfor (int i=0; i<101; i++)\r\n\t\t{\r\n\t\t\tprintf(\" Test: %d\\n\", i);\r\n\t\t\tprintf(\"------------------------\\n\");\r\n\r\n\t\t\tint value = RandGen(Max_Value)<<16 | RandGen(Max_Value);\r\n\t\t\t*(baseaddr_p+a) = value;\r\n\r\n\t\t\txil_printf(\"MWrote: 0x%08x \\n\", *(baseaddr_p+a));\r\n\t\t\txil_printf(\"Read : 0x%08x \\n\", *(baseaddr_p+b));\r\n\r\n\t\t\ta = a + 4;\t//every even register = read\r\n\t\t\tb = b + 4;\t//every odd register = write\r\n\r\n\t\t\tprintf(\"------------------------\\n\\r\");\r\n\t\t}\r\n}\r\n\r\nint main()\r\n{\r\n\tinit_platform();\r\n\txil_printf(\"Multiplier Test\\n\\r\");\r\n\tTaskOne();\r\n\r\n\tfor (int i=0; i<101; i++)\r\n\t{\r\n\t\tprintf(\" Test: %d\\n\", i);\r\n\t\tprintf(\"------------------------\\n\");\r\n\t\tTaskTwo();\r\n\t\tprintf(\"------------------------\\n\\r\");\r\n\t}\r\n\treturn 0;\r\n\r\n}\r\n" }, { "alpha_fraction": 0.52542644739151, "alphanum_fraction": 0.5362085700035095, "avg_line_length": 33.50857162475586, "blob_id": "c30c74bac83bff2ef19edd45794f9f85fba85c55", "content_id": "19a5eaf8430e2a549285edc94d90492a0aac257e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6214, "license_type": "no_license", "max_line_length": 125, "num_lines": 175, "path": "/Zybo_MiniProjects/MP1/Code/Mini_Proj_1.c", "repo_name": "Space-Odie/Zybo_Projects", "src_encoding": "UTF-8", "text": "/*--------------------------------------------------------------\r\n 526 L\t\t\t\t\tMini Project\t\t\t\tSpring 2021\r\n----------------------------------------------------------------\r\nLAB Title: Final Exam Take Home Mini Project 1\r\nFile Name: Mini_Project.c\r\nAuthor: Ian O'Donnell\r\n----------------------------------------------------------------\r\nVersion\t\t\t\t\tDate\t\t\t\t\tDescription\r\n1.0 \t\t\t3/25/21\t \t\t Initial Release\r\n----------------------------------------------------------------\r\nPurpose:\r\n\tgenerates a periodic waveform defined by five parameters:\r\n\t1. Final value, 2. Drop value, 3. Step Horizontal value,\r\n\t4. Step Vertical Value, 5. Period\r\n---------------------------------------------------------------*/\r\n#include <stdio.h>\r\n#include \"xbasic_types.h\"\r\n#include \"platform.h\"\r\n#include \"xil_printf.h\"\r\n\r\n#include \"xparameters.h\"\r\n#include \"xstatus.h\"\r\n#include <time.h>\r\n#include \"sleep.h\"\r\n\r\n//write to BRAM: Xil_Out16(address, value);\r\n//read from BRAM: int num = Xil_In16(address);\r\n/*###############################################################################################################\r\n#\tPurpose: Creates a delay for the program - Could also use sleep command\r\n###############################################################################################################*/\r\nvoid DELAY(int delay)\r\n{\r\n\r\n\tfor (volatile int Delay=0; Delay<delay; Delay++);\r\n}\r\n\r\n/*###############################################################################################################\r\n#\tPurpose: \tTells user if input values will generate an error \r\n#\tInput: \t\tUser Input Values\r\n#\tOutput: \tPrint error message \r\n###############################################################################################################*/\r\n\r\nvoid Error_Handler(int Step_Horizontal_Value, int Step_Vertical_Value, int Period, int Drop_Value, int Final_Value)\r\n{\r\n//These Errors are not 'handled' as the design does not require it but the messages will display for the user.\r\n\tint Distance_Traveled = Drop_Value/Step_Vertical_Value * Step_Horizontal_Value;\r\n\r\n\tif (Distance_Traveled >= Period)\r\n\t{\r\n\t\tprintf( \"Period Value is too small, Final Value will not be reached. (Error: %d <= %d)\\n\", Period, Distance_Traveled );\r\n\t}\r\n\r\n\r\n\tif ( Step_Horizontal_Value > Period )\r\n\t{\r\n\t\tprintf(\"Horizontal Value must be less than the Period\\n\");\r\n\t}\r\n\r\n\tif ( Step_Vertical_Value > Drop_Value )\r\n\t{\r\n\t\tprintf( \"Vertical Step Value must be less than the Drop Value\\n\");\r\n\t}\r\n\r\n\tif (Drop_Value > Final_Value)\r\n\t{\r\n\t\tprintf( \"Drop Value is greater than your Final Value, Can not use negative values\");\r\n\t}\r\n}\r\n\r\n/*###############################################################################################################\r\n#\tPurpose: \tGenerate Waveform based on user input values. First Value will be the Final Value. \r\n\t\t\t\tThe wave form will ramp up, from the dropped value, until the final value is reached. \r\n\t\t\t\tThe X axis is the period and y value is the vertical value. \r\n#\tInput: \t\tUser Input Values\r\n#\tOutput: \tPrint error message \r\n###############################################################################################################*/\r\nint main()\r\n{\r\n\tshort int Final_Value, Drop_Value, Period, Step_Vertical_Value, Step_Horizontal_Value;\r\n\t//initialize\r\n\txil_printf(\"Mini Project!\\n\");\r\n\txil_printf(\"Gathering User Inputs. . .\\n\");\r\n/*\t//Hardcoded Values\r\n\tFinal_Value = 10000;\r\n\tDrop_Value = 8500;\r\n\tPeriod = 851;\r\n\tStep_Vertical_Value = 20;\r\n\tStep_Horizontal_Value = 2;\r\n*/\r\n\t//Allow User to Enter Values in:\r\n printf(\"Enter The Final Value: \");\r\n scanf(\"%hu\", &Final_Value);\r\n\r\n printf(\"Enter Drop_Value: \");\r\n scanf(\"%hu\", &Drop_Value);\r\n\r\n printf(\"Enter The Period: \");\r\n scanf(\"%hu\", &Period);\r\n\r\n printf(\"Enter The Step_Vertical_Value: \");\r\n scanf(\"%hu\", &Step_Vertical_Value);\r\n\r\n printf(\"Enter The Step_Horizontal_Value: \");\r\n scanf(\"%hu\", &Step_Horizontal_Value);\r\n\r\n\t//Error Handler\r\n\txil_printf(\"Checking for Errors . . .\\n\");\r\n\tError_Handler(Step_Horizontal_Value, Step_Vertical_Value, Period, Drop_Value, Final_Value);\r\n\r\n\r\n\t//Write to BRAM (Calculate Values)\r\n\txil_printf(\"Generating X Values. . .\\n\");\r\n\tint Current_Value = 0;\r\n\r\n\tint n = 2; //Step 2\r\n\tint H_Max = n *(Period / Step_Horizontal_Value);\t\t//number of iterations needed for runs\r\n\r\n\t//Write the Horizontal Values (X) to the BRAM (H_Max will always be greater or equal to V_Max?)\r\n\tfor (int j = 0; j <= H_Max; j = j + n)\r\n\t{\r\n\t\tif (j == 0)\r\n\t\t{\r\n\t\t\tCurrent_Value == 0;\r\n\t\t\tXil_Out16(j, Current_Value);\t\t\t\t\t\t\t//First value will always start at 0\r\n\t\t}\r\n\r\n\t\telse\r\n\t\t{\r\n\t\t\tCurrent_Value = Current_Value + Step_Horizontal_Value; \t//Additional Values will be incremented by the step value\r\n\t\t\tXil_Out16(j, Current_Value);\r\n\t\t}\r\n\r\n\t}\r\n\r\n\t//Write the Vertical Values (Y) to the BRAM (in address starting after the last horizontal value.\r\n\txil_printf(\"Generating Y Values. . .\\n\");\r\n\tint Y_Value = 0;\r\n\tint Y_Base = H_Max + n;\r\n\tfor (int i = Y_Base; i<= Y_Base*2; i = i + n)\r\n\t{\r\n\t\tif (i == Y_Base)\r\n\t\t{\r\n\t\t\tY_Value = Final_Value;\t\t\t\t\t\t\t\t\t\t//First Value is Final Value\r\n\t\t\tXil_Out16(i, Y_Value);\r\n\t\t}\r\n\t\tif (i == Y_Base + n)\r\n\t\t{\r\n\t\t\tY_Value = Final_Value - Drop_Value;\t\t\t\t\t\t\t//Second Value is Final Value - Drop Value\r\n\t\t\tXil_Out16(i, Y_Value);\r\n\t\t}\r\n\t\telse\r\n\t\t\tif ((Y_Value + Step_Vertical_Value) < Final_Value)\t\t\t//be looking one step ahead to verify it does not go over final value\r\n\t\t\t{\r\n\t\t\t\tY_Value = Y_Value + Step_Vertical_Value;\t\t\t\t//Additional Values will be incremented by the step value\r\n\t\t\t\tXil_Out16(i, Y_Value);\r\n\t\t\t}\r\n\t\t\telse \t\t\t\t\t\t\t\t\t\t\t\t\t\t//Don't increment Current_Value once it has reached the Final Value, alsi set it to Final Vlaue\r\n\t\t\t{\r\n\t\t\t\tXil_Out16(i, Final_Value);\r\n\t\t\t}\r\n\t}\r\n\r\n\t//Read Values (There is a chance that there are more X values than Y values, but never more Ys than Xs?\r\n\txil_printf(\"Final Value = %d, Drop Value = %d Period = %d, \\nStep Vertical Value = %d Step Horizontal Value = %d\\n\",\r\n\t\t\t\tFinal_Value, Drop_Value, Period, Step_Vertical_Value, Step_Horizontal_Value);\r\n\r\n\txil_printf(\" X , Y \\n\");\r\n\tfor (int k = 0; k <= H_Max + 1; k = k + n)\r\n\t{\r\n\t\tint H_Read = Xil_In16(k);\r\n\t\tint Y_Read = Xil_In16((Y_Base + k));\r\n\t\txil_printf(\" %d , %d \\n\",H_Read, Y_Read);\r\n\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5775711536407471, "alphanum_fraction": 0.5984892249107361, "avg_line_length": 23.95652198791504, "blob_id": "9642b8e81ccaadc601e7344150ab1cdd5d152122", "content_id": "7c7e846d0e0d16c9e1686fdf662f4bc76dd456e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1721, "license_type": "no_license", "max_line_length": 115, "num_lines": 69, "path": "/Zybo_MiniProjects/MP1/Code/Graphing_Script.py", "repo_name": "Space-Odie/Zybo_Projects", "src_encoding": "UTF-8", "text": "import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import StepPatch\n\nfrom tkinter import * \nfrom tkinter.filedialog import askopenfilename\n\nimport os\nimport math\n\n#Configure These Values\nx_data_points = 50 #Enter # Of X Data Points\nxlabel = 'Period' \nylabel = 'Value' \nTitle = 'Mini Project 1 - Test 3'\n\n#School Colors:\nBlack = '#000000'\nPatone = '#D22030'\n#Secondary Colors:\nCool_Gray = '#55565A'\n\n#Set Colors\ny_color = Black\nx_color = Black\ngraph_color = Patone\nTitle_Color = Patone\n\n\nfilepath = askopenfilename() \nprint(filepath)\nX_Axis = []\nY_Axis = []\nwith open(filepath, 'r') as csvfile:\n plots = csv.reader(csvfile,delimiter =',',)\n for i, row in enumerate(plots):\n if i == 0: #Initialize Graph\n Header = row #Grab the Header Descriptions\n Total_Points = len(Header) - 1 #Calcualte how many Y values there are \n\n else:\n x_obj = int(row[0])\n X_Axis.append(x_obj)\n y_obj = int(row[1])\n Y_Axis.append(y_obj)\n\n\nstep = math.ceil(len(X_Axis)/x_data_points)\n\nfig, ax1 = plt.subplots()\n\n\nlines, labels = ax1.get_legend_handles_labels()\nax1.legend(lines, labels, loc = 0 )\nax1.set_xticklabels(X_Axis[::step], rotation=45)\nax1.tick_params(axis = 'y', labelcolor = y_color)\nax1.tick_params(axis = 'x', labelcolor = x_color)\nax1.set_xlabel(xlabel)\nax1.set_ylabel(ylabel)\nax1.set_title(Title, color=Title_Color)\n#t = plt.title(Title)\n\n\n\nplt.step(X_Axis, Y_Axis, color=graph_color)\nfig.tight_layout()\nplt.grid(True)\nplt.show()" }, { "alpha_fraction": 0.7464967966079712, "alphanum_fraction": 0.7583864331245422, "avg_line_length": 56.79999923706055, "blob_id": "53bd49bf251301f4d8ee293a81d7a953045abe7a", "content_id": "2651e19f5c4ea958a1617984bf3e6cfd1afe95dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2355, "license_type": "no_license", "max_line_length": 176, "num_lines": 40, "path": "/Zybo_MiniProjects/MP2/README.txt", "repo_name": "Space-Odie/Zybo_Projects", "src_encoding": "UTF-8", "text": "To Run this Project do the following:\r\n\r\n1) Open Vivado HLS\r\n2) Create a New Application and import source I2B.c found at Ian_Odonnell_MP2\\Source_Files\\HLS Project\r\n\r\n3) Click on Run Sythesis \r\n\r\n4) Pipelines can be added by clicking on the directive tab. The FOR loops are labeled to easily understand \r\n\twhere the constraints are being applied. \r\n\t\r\n5) Click on vivado_hls_compare_report.html located at Ian_Odonnell_MP2\\Source_Files\\HLS Project\r\n\tThis will display four different configurations using:\r\n\t\ta) No Constraint\r\n\t\tb) Pipeline \r\n\t\tc) Unrolling\r\n\t\td) Latency\r\n\r\n6) These directives were applied in the following format\r\n\t\ta) No Constraint - not directives added\r\n\t\tb) Pipeline - Pipeline (default) was added to main function\r\n\t\tc) unrolling - Unroll was added to the OUTER loop of each function\r\n\t\td) Latency - Latency (min 0; max 1) was added to the INNER loop of each function\r\n\t\t*NOTE: The Latency was applied to the OUTER loop while I was doing my verification video and hence, showed no change. \r\n\t\tI have since found the warning indicating that I am unable to apply the Latency directive to the OUTER loop and fixed it.\r\n\t\t\r\n\t\t\t\r\n\t\t\r\nObservations of Each Result:\r\n\t\ta) No Constraint shows a base model of the time required to run through all of the functions\r\n\t\tb) Initiation_Interval Solution (Pipeline) Shows that this constraint, when used on the main functions\t\r\n\t\t\tCan drasticatly bring down the required latency from 22571 to 2186 intervals. \r\n\t\t\tHowever, this came at a cost of requiring more harware than the Zybo-Z710 has. \r\n\t\tc) Unrolling did not have much of an affect when it was done on just the main function so It was applied to each of the outter loops.\r\n\t\t\tThis allowed for latency to be decreased to 19331 with a much less extensive hardware increase. \r\n\t\td) The latency has no effect on this design. \r\n\t\t\r\n\t\t\r\n\t\tIt was clear from this project that different pipelines can be done on different functions in order to meet certain specifications (required latency and hardware constraints)\r\n\t\tHowever, for this lab, no such timing or hardware specification was given. Because of this, finding the optimal solution could not be done because it is \r\n\t\tnot clear if there is a hardware or performance limit. But in general, a solution should be created that has a similiar hardware increase to timing decrease ratio. \r\n\t\t" }, { "alpha_fraction": 0.5818778276443481, "alphanum_fraction": 0.6027129888534546, "avg_line_length": 27.37239646911621, "blob_id": "4d9af1cc5924f8827f7213c239cdc4af2d7d7ac4", "content_id": "ad30e8391f3b80a95f0544c6db0c200e6adb6120", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 11281, "license_type": "no_license", "max_line_length": 124, "num_lines": 384, "path": "/Zybo_MiniProjects/Labs_SourceCode/Lab5.c", "repo_name": "Space-Odie/Zybo_Projects", "src_encoding": "UTF-8", "text": "/*----------------------------------------------------------\r\n 520 L Lab 5 Fall 2020\r\n -----------------------------------------------------------\r\n File Name: Interrupt.c\r\n Author: Ian O'Donnell\r\n -----------------------------------------------------------\r\n Version Date Description\r\n 1.0 3-31-2020 Initial Release\r\n 2.0 4-03-2020 Added Part 2-D requirements\r\n 3.0 4-03-2020 Added Part 3 requirements\r\n\r\n -----------------------------------------------------------\r\n Purpose: \r\n Create a project using buttons, a counter, and switches.\r\n The counter counts based on a set amount of time. \r\n The buttons and switches will interrupt this process. \r\n LED’s will display the current count.\r\n UART is used to display the function of each input and \r\n The current count. \r\n ----------------------------------------------------------*/\r\n#include \"xparameters.h\"\r\n#include \"xgpio.h\"\r\n#include \"xtmrctr.h\"\r\n#include \"xscugic.h\"\r\n#include \"xil_exception.h\"\r\n#include \"xil_printf.h\"\r\n\r\n#include <stdio.h>\r\n#include \"xbasic_types.h\"\r\n\r\n#include \"xil_printf.h\"\r\n\r\n#include \"xparameters.h\"\r\n#include \"xstatus.h\"\r\n#include <time.h>\r\n#include \"sleep.h\"\r\n\r\n// Parameter definitions\r\n#define INTC_DEVICE_ID \t\tXPAR_PS7_SCUGIC_0_DEVICE_ID\r\n#define TMR_DEVICE_ID\t\tXPAR_TMRCTR_0_DEVICE_ID\r\n#define BTNS_DEVICE_ID\t\tXPAR_AXI_GPIO_0_DEVICE_ID\r\n#define LEDS_DEVICE_ID\t\tXPAR_AXI_GPIO_1_DEVICE_ID\r\n#define SWITCH_DEVICE_ID\tXPAR_AXI_GPIO_2_DEVICE_ID\r\n#define INTC_GPIO_INTERRUPT_ID_1 XPAR_FABRIC_AXI_GPIO_0_IP2INTC_IRPT_INTR\r\n#define INTC_GPIO_INTERRUPT_ID_2 XPAR_FABRIC_AXI_GPIO_2_IP2INTC_IRPT_INTR\r\n#define INTC_TMR_INTERRUPT_ID XPAR_FABRIC_AXI_TIMER_0_INTERRUPT_INTR\r\n\r\n#define BTN_INT \t\t\tXGPIO_IR_CH1_MASK\r\n#define SW_INT \t\t\t\tXGPIO_IR_CH1_MASK\r\n#define TMR_LOAD\t\t\t0xF8000000\r\n#define TMR_LOAD_2\t\t\t0xFF000000 //0xF0000000 = double the time\r\n//define timer counter variables\r\n#define TMR_CNT_1 \t\t\t0xF0000\r\n#define TMR_CNT_2 \t\t\t0x3\r\n\r\nXGpio LEDInst, BTNInst, SWInst;\r\nXScuGic INTCInst;\r\nXTmrCtr TMRInst;\r\nstatic int led_data;\r\nstatic int btn_value;\r\nstatic int tmr_count;\r\nstatic int sw_value;\r\n\r\n//----------------------------------------------------\r\n// PROTOTYPE FUNCTIONS\r\n//----------------------------------------------------\r\nstatic void BTN_Intr_Handler(void *baseaddr_p);\r\nstatic void SW_Intr_Handler(void *baseaddr_p);\r\nstatic void TMR_Intr_Handler(void *baseaddr_p);\r\nstatic int InterruptSystemSetup(XScuGic *XScuGicInstancePtr);\r\nstatic int IntcInitFunction(u16 DeviceId, XTmrCtr *TmrInstancePtr, XGpio *GpioInstancePtr);\r\n\r\n//----------------------------------------------------\r\n// INTERRUPT HANDLER FUNCTIONS\r\n// - called by the timer, button interrupt, performs\r\n// - LED flashing\r\n//----------------------------------------------------\r\nint sw1_latched = 0;\r\nint sw2_latched = 0;\r\nint sw3_latched = 0;\r\nint sw4_latched;\r\nint Switch_1;\r\nint Switch_2;\r\nint Switch_3;\r\nint Switch_4;\r\n\r\nint tmr_cnt;\r\nint n = 1;\r\n\r\nvoid SW_Intr_Handler(void *InstancePtr)\r\n{\r\n\t// Disable GPIO interrupts\r\n\tXGpio_InterruptDisable(&SWInst, SW_INT);\r\n\t// Ignore additional button presses\r\n\tif ((XGpio_InterruptGetStatus(&SWInst) & SW_INT) !=\r\n\t\t\tSW_INT) {\r\n\t\t\treturn;\r\n\t\t}\r\n\tsw_value = XGpio_DiscreteRead(&SWInst, 1);\r\n\r\n\t Switch_1 = sw_value & 0x1;\r\n\t// Switch_2 = sw_value & 0x2;\r\n\t// Switch_3 = sw_value & 0x4;\r\n\t Switch_4 = sw_value & 0x8;\r\n\r\n\t//Code Switch 1\r\n\r\n\tif (Switch_1 == 1 && sw1_latched == 0){\r\n\t\tsw1_latched = 1;\r\n\t\tXTmrCtr_Stop(&TMRInst,0);\r\n\t\txil_printf(\"Timer Paused \\n\");\r\n\t}\r\n\r\n\telse if(Switch_1 == 0 && sw1_latched == 1){\r\n\t\tsw1_latched = 0;\r\n\r\n\t\tXTmrCtr_Start(&TMRInst,0);\t//start timer\r\n\t\txil_printf(\"Timer Resumed \\n\");\r\n\t}\r\n\r\n\t//Code Switch 2\r\n\t\t//Coder within the TMR_Intr_Handler because that's where the counting occurs\r\n\r\n\t//Code Switch 3\r\n\tSwitch_3 = sw_value & 0x4;\r\n\tif (Switch_3 == 4 && sw3_latched == 0){\r\n\t\tsw3_latched = 1;\r\n\t\t XTmrCtr_SetResetValue(&TMRInst, 0, TMR_LOAD_2);\r\n\t\txil_printf(\"Timer Loaded Value: %d \\n\", TMR_LOAD_2);\r\n\t}\r\n\telse if(Switch_3 == 0 && sw3_latched == 1){\r\n\t\tsw3_latched = 0;\r\n\t\t XTmrCtr_SetResetValue(&TMRInst, 0, TMR_LOAD);\r\n\t\txil_printf(\"Timer Loaded Default Value: %d \\n\", TMR_LOAD);\r\n\t}\r\n\r\n\t//Code Switch 4\r\n\t\t//Moved to TMR_Intr_handler\r\n\r\n\t//xil_printf(\"Sw1: %d, Sw2: %d, Sw3: %d, Sw4: %d, Sw3_Latch = %d \\n\", Switch_1, Switch_2, Switch_3, Switch_4, sw3_latched);\r\n\r\n //XGpio_DiscreteWrite(&LEDInst, 1, led_data);\r\n\r\n (void)XGpio_InterruptClear(&SWInst, SW_INT);\r\n // Enable GPIO interrupts\r\n XGpio_InterruptEnable(&SWInst, SW_INT);\r\n}\r\n\r\n\r\n\r\nvoid BTN_Intr_Handler(void *InstancePtr)\r\n{\r\n\t// Disable GPIO interrupts\r\n\tXGpio_InterruptDisable(&BTNInst, BTN_INT);\r\n\t// Ignore additional button presses\r\n\tif ((XGpio_InterruptGetStatus(&BTNInst) & BTN_INT) !=\r\n\t\t\tBTN_INT) {\r\n\t\t\treturn;\r\n\t\t}\r\n\tbtn_value = XGpio_DiscreteRead(&BTNInst, 1);\r\n\r\n\t// Reset if centre button pressed\r\n\tswitch(btn_value){\r\n\t\tcase 1 :\r\n\t\t\tXTmrCtr_Reset(&TMRInst,0);\t//reset timer with right button\r\n\t\t\txil_printf(\"Timer Reset\\n\");\r\n\t\t\tbreak;\r\n\t\tcase 2 :\r\n\t\t\tXGpio_DiscreteWrite(&LEDInst, 1, 0);\t//reset LEDs\r\n\t\t\txil_printf(\"LED's Reset\\n\");\r\n\t\t\tbreak;\r\n\t\tcase 4 :\r\n\t\t\tXTmrCtr_Start(&TMRInst,0);\t//start timer\r\n\t\t\txil_printf(\"Timer Started\\n\");\r\n\t\t\tbreak;\r\n\t\tcase 8 :\r\n\t\t\tXTmrCtr_Stop(&TMRInst,0);\t//stop timer\r\n\t\t\txil_printf(\"Timer Stopped \\n\");\r\n\t\t\tbreak;\r\n\t\tdefault :\r\n\r\n\t\t\tbreak;\r\n\t}\r\n\r\n\tint value = XTmrCtr_GetValue(&TMRInst,0);\r\n\t//TODO: Add a feature to determine if the timer is STOPPED (get value = previous get value)\r\n\r\n\t//xil_printf(\"Get Value: %d\\n\", value);\r\n\r\n //XGpio_DiscreteWrite(&LEDInst, 1, led_data);\r\n\r\n (void)XGpio_InterruptClear(&BTNInst, BTN_INT);\r\n // Enable GPIO interrupts\r\n XGpio_InterruptEnable(&BTNInst, BTN_INT);\r\n}\r\n\r\nvoid TMR_Intr_Handler(void *data)\r\n{\r\n\tif (XTmrCtr_IsExpired(&TMRInst,0)){\r\n\t\tsw_value = XGpio_DiscreteRead(&SWInst, 1);\r\n\r\n\t\tif (Switch_4 == 8 && sw4_latched == 0){\r\n\t\t\tsw4_latched = 1;\r\n\t\t\ttmr_cnt = TMR_CNT_1;\r\n\t\t\txil_printf(\"tmr_count Max value set to %d \\n\", tmr_cnt);\r\n\t\t}\r\n\t\telse if(Switch_4 == 0 && sw4_latched == 1){\r\n\t\t\tsw4_latched = 0;\r\n\t\t\ttmr_cnt = TMR_CNT_2;\r\n\t\t\txil_printf(\"tmr_count Max value set to %d \\n\", tmr_cnt);\r\n\t\t}\r\n\r\n\r\n\r\n\t\t// Once timer has expired 3 times, stop, increment counter\r\n\t\t// reset timer and start running again\r\n\t\tif(tmr_count == tmr_cnt){\r\n\t\t\tXTmrCtr_Stop(&TMRInst,0);\r\n\t\t\ttmr_count = 0;\r\n\r\n\t\t\t//if DIP Switch 2 is active, cause the LED up count to be a down count\r\n\r\n\t\t\tSwitch_2 = sw_value & 0x2;\r\n\t\t\tif (Switch_2 == 2 && sw2_latched == 0){\r\n\t\t\t\tsw2_latched = 1;\r\n\t\t\t\tn = -1;\r\n\t\t\t\txil_printf(\"Counting Down\\n\");\r\n\t\t\t} else if(Switch_2 == 0 && sw2_latched == 1) {\r\n\t\t\t\tsw2_latched = 1;\r\n\t\t\t\tn = 1;\r\n\t\t\t\txil_printf(\"Counting Up \\n\");\r\n\t\t\t}\r\n\r\n\t\t\tled_data = led_data + n;\r\n\r\n\t\t\t//reset LED's with button value\r\n\t\t\tbtn_value = XGpio_DiscreteRead(&BTNInst, 1);\r\n\t\t\tif (btn_value == 2){\r\n\t\t\t\tled_data = 0;\r\n\t\t\t\txil_printf(\"led_data reset\\n\");\r\n\t\t\t}\r\n\t\t\txil_printf(\"Count: %d\\n\", led_data);\r\n\t\t\tXGpio_DiscreteWrite(&LEDInst, 1, led_data);\r\n\t\t\tXTmrCtr_Reset(&TMRInst,0);\r\n\t\t\tXTmrCtr_Start(&TMRInst,0);\r\n\r\n\r\n\t\t}\r\n\t\telse tmr_count++;\r\n\t\t//xil_printf(\"tmr_count = %d\\n\", tmr_count);\r\n\r\n\t}\r\n}\r\n\r\n\r\n\r\n//----------------------------------------------------\r\n// MAIN FUNCTION\r\n//----------------------------------------------------\r\nint main (void)\r\n{\r\n int status;\r\n //----------------------------------------------------\r\n // INITIALIZE THE PERIPHERALS & SET DIRECTIONS OF GPIO\r\n //----------------------------------------------------\r\n // Initialise LEDs\r\n status = XGpio_Initialize(&LEDInst, LEDS_DEVICE_ID);\r\n if(status != XST_SUCCESS) return XST_FAILURE;\r\n // Initialise Push Buttons\r\n status = XGpio_Initialize(&BTNInst, BTNS_DEVICE_ID);\r\n if(status != XST_SUCCESS) return XST_FAILURE;\r\n // Initialize Switches\r\n status = XGpio_Initialize(&SWInst, SWITCH_DEVICE_ID);\r\n if(status != XST_SUCCESS) return XST_FAILURE;\r\n\r\n // Set LEDs direction to outputs\r\n XGpio_SetDataDirection(&LEDInst, 1, 0x00);\r\n // Set all buttons direction to inputs\r\n XGpio_SetDataDirection(&BTNInst, 1, 0xFF);\r\n // SET ALL SWITCHES DIRECTIONS TO INPUTS\r\n XGpio_SetDataDirection(&SWInst, 1, 0xFF);\r\n\r\n\r\n\r\n\r\n\r\n //----------------------------------------------------\r\n // SETUP THE TIMER\r\n //----------------------------------------------------\r\n status = XTmrCtr_Initialize(&TMRInst, TMR_DEVICE_ID);\r\n if(status != XST_SUCCESS) return XST_FAILURE;\r\n XTmrCtr_SetHandler(&TMRInst, TMR_Intr_Handler, &TMRInst);\r\n XTmrCtr_SetResetValue(&TMRInst, 0, TMR_LOAD);\r\n XTmrCtr_SetOptions(&TMRInst, 0, XTC_INT_MODE_OPTION | XTC_AUTO_RELOAD_OPTION);\r\n\r\n\r\n // Initialize interrupt controller\r\n status = IntcInitFunction(INTC_DEVICE_ID, &TMRInst, &BTNInst);\r\n if(status != XST_SUCCESS) return XST_FAILURE;\r\n\r\n // Initialize interrupt controller\r\n status = IntcInitFunction(INTC_DEVICE_ID, &TMRInst, &SWInst);\r\n if(status != XST_SUCCESS) return XST_FAILURE;\r\n\r\n XTmrCtr_Start(&TMRInst, 0);\r\n\r\n while(1);\r\n\r\n return 0;\r\n}\r\n\r\n//----------------------------------------------------\r\n// INITIAL SETUP FUNCTIONS\r\n//----------------------------------------------------\r\n\r\nint InterruptSystemSetup(XScuGic *XScuGicInstancePtr)\r\n{\r\n\t// Enable interrupt - Buttons\r\n\tXGpio_InterruptEnable(&BTNInst, BTN_INT);\r\n\tXGpio_InterruptGlobalEnable(&BTNInst);\r\n\r\n\t// Enable interrupt - Switches\r\n\tXGpio_InterruptEnable(&SWInst, SW_INT);\r\n\tXGpio_InterruptGlobalEnable(&SWInst);\r\n\r\n\tXil_ExceptionRegisterHandler(XIL_EXCEPTION_ID_INT,\r\n\t\t\t \t \t \t \t \t (Xil_ExceptionHandler)XScuGic_InterruptHandler,\r\n\t\t\t \t \t \t \t \t XScuGicInstancePtr);\r\n\tXil_ExceptionEnable();\r\n\r\n\r\n\treturn XST_SUCCESS;\r\n\r\n}\r\n\r\nint IntcInitFunction(u16 DeviceId, XTmrCtr *TmrInstancePtr, XGpio *GpioInstancePtr)\r\n{\r\n\tXScuGic_Config *IntcConfig;\r\n\tint status;\r\n\r\n\t// Interrupt controller initialisation\r\n\tIntcConfig = XScuGic_LookupConfig(DeviceId);\r\n\tstatus = XScuGic_CfgInitialize(&INTCInst, IntcConfig, IntcConfig->CpuBaseAddress);\r\n\tif(status != XST_SUCCESS) return XST_FAILURE;\r\n\r\n\t// Call to interrupt setup\r\n\tstatus = InterruptSystemSetup(&INTCInst);\r\n\tif(status != XST_SUCCESS) return XST_FAILURE;\r\n\r\n\t// Connect GPIO interrupt to handler\r\n\tstatus = XScuGic_Connect(&INTCInst,\r\n\t\t\t\t\t \t \t INTC_GPIO_INTERRUPT_ID_1,\r\n\t\t\t\t\t \t \t (Xil_ExceptionHandler)BTN_Intr_Handler,\r\n\t\t\t\t\t \t \t (void *)GpioInstancePtr);\r\n\tif(status != XST_SUCCESS) return XST_FAILURE;\r\n\r\n\tstatus = XScuGic_Connect(&INTCInst,\r\n\t\t\t\t\t \t \t INTC_GPIO_INTERRUPT_ID_2,\r\n\t\t\t\t\t \t \t (Xil_ExceptionHandler)SW_Intr_Handler,\r\n\t\t\t\t\t \t \t (void *)GpioInstancePtr);\r\n\tif(status != XST_SUCCESS) return XST_FAILURE;\r\n\r\n\t// Connect timer interrupt to handler\r\n\tstatus = XScuGic_Connect(&INTCInst,\r\n\t\t\t\t\t\t\t INTC_TMR_INTERRUPT_ID,\r\n\t\t\t\t\t\t\t (Xil_ExceptionHandler)TMR_Intr_Handler,\r\n\t\t\t\t\t\t\t (void *)TmrInstancePtr);\r\n\tif(status != XST_SUCCESS) return XST_FAILURE;\r\n\r\n\t// Enable GPIO interrupts interrupt\r\n\tXGpio_InterruptEnable(GpioInstancePtr, 1);\r\n\tXGpio_InterruptGlobalEnable(GpioInstancePtr);\r\n\r\n\t// Enable GPIO and timer interrupts in the controller\r\n\tXScuGic_Enable(&INTCInst, INTC_GPIO_INTERRUPT_ID_1);\r\n\r\n\t// Enable GPIO and timer interrupts in the controller\r\n\tXScuGic_Enable(&INTCInst, INTC_GPIO_INTERRUPT_ID_2);\r\n\r\n\r\n\tXScuGic_Enable(&INTCInst, INTC_TMR_INTERRUPT_ID);\r\n\r\n\r\n\treturn XST_SUCCESS;\r\n" }, { "alpha_fraction": 0.3349180221557617, "alphanum_fraction": 0.34898611903190613, "avg_line_length": 37.498085021972656, "blob_id": "1c7e0429ee84c9ddfee64b943029c30bb53f14ab", "content_id": "f46383ceeb438995205bc86e89dee233445204e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 10307, "license_type": "no_license", "max_line_length": 137, "num_lines": 261, "path": "/Zybo_MiniProjects/MP2/Source_Files/main.c", "repo_name": "Space-Odie/Zybo_Projects", "src_encoding": "UTF-8", "text": "/*###############################################################################################################\r\n# Created by Ian O'Donnell\r\n# Date: 4/10/21\r\n#################################################################################################################\r\n# Purpose: \r\n#\tSynthesizable software application to implement simple image processing algorithm\r\n#\r\n#\tInput: Image Array (Found in image.h)\r\n#\tOutput: modified Image Array after it goes through an image processing algorithm. \r\n###############################################################################################################*/\r\n\r\n//Includes\r\n#include <stdio.h>\r\n#include <math.h>\r\n#include \"image.h\"\r\n\r\n//Declare Global Variables that will be used to loop through the images\r\n//Row / Col can be modified here to process an image of a different size. \r\nint row = 25;\t\t\t\t\t\t\t\t\t\r\nint col = 25;\r\nint i, j;\r\n\r\n/*###############################################################################################################\r\n#\tPurpose: Converts all pixels to either intenisty 0 or 255 based on the threshold value\r\n#\tInput: Threshold value (decimal from 0-1)\r\n#\tGlobal Variable Used: img matrix\r\n#\tOutput: Binary Image matrix\r\n###############################################################################################################*/\r\nvoid I2B(float level){ \r\n int threshold = 256*level; //calculate threshold\r\n ROW: for(i = 0; i < row; i++){\r\n COL: for(j = 0; j < col; j++){\r\n if (img[i][j] < threshold){\r\n b[i][j] = 0;\r\n } else {\r\n b[i][j] = 1;\r\n }\r\n }\r\n }\r\n}\r\n\r\n/*###############################################################################################################\r\n#\tPurpose: Pads an image with all zeros around the border\r\n#\tGlobal Variable Used: img matrix\r\n#\tOutput: Zero Padded matrix (Size 27 x 27)\r\n###############################################################################################################*/\r\nvoid ZeroPad(){\r\n //TODO - First zero pad the image\r\n int r = row + 2; \r\n int c = col + 2; \r\n ROW: for(i = 0; i < r; i++) { \r\n COLUMN: for(j = 0; j < c; j++){\r\n if (i == 0){\r\n zeropad[i][j] = 0;\r\n }\r\n else if (i == r-1){\r\n zeropad[i][j] = 0;\r\n }\r\n else if (j == 0){\r\n zeropad[i][j] = 0;\r\n }\r\n else if (j == c-1){\r\n zeropad[i][j] = 0;\r\n } else {\r\n zeropad[i][j] = img[i-1][j-1]; \r\n }\r\n }\r\n }\r\n}\r\n\r\n/*###############################################################################################################\r\n#\tPurpose: Assigns a new the value of each pixel based on the average values of the neighborhood \r\n#\tGlobal Variable Used: zeropad image matrix\r\n#\tOutput: Average Image Array\r\n###############################################################################################################*/\r\nvoid Avg(){\r\n int r = row + 2; \r\n int c = col + 2;\r\n ROW: for(i = 0; i < r; i++) { \r\n COLUMN: for(j = 0; j < c; j++){\r\n\r\n int m = i + 1;\r\n int n = j + 1;\r\n avg[i][j] = (zeropad[m-1][n] + zeropad[m+1][n] + zeropad[m][n] \r\n + zeropad[m-1][n-1] + zeropad[m+1][n-1] + zeropad[m][n-1]\r\n + zeropad[m-1][n+1] + zeropad[m+1][n+1] + zeropad[m][n+1]) / 9; \r\n }\r\n }\r\n}\r\n/*###############################################################################################################\r\n#\tPurpose: Inverts the pixel intensity values \r\n#\tGlobal Variable Used: img matrix\r\n#\tOutput: Inverted image array\r\n###############################################################################################################*/\r\nvoid Inv(){\r\n ROW: for(i = 0; i < row; i++){\r\n COL: for(j = 0; j < col; j++){\r\n inv[i][j] = 255 - img[i][j]; \r\n }\r\n }\r\n}\r\n\r\n/*###############################################################################################################\r\n#\tPurpose: Scales the each pixel in an array by a constant factor\r\n#\tGlobal Variable Used: img matrix\r\n#\tInput: scaling factor (Currently only scaled by int values)\r\n#\tOutput: Scaled image array\r\n###############################################################################################################*/\r\nvoid Scale(float w){\r\n ROW: for(i = 0; i < row; i++){\r\n COL: for(j = 0; j < col; j++){\r\n sca[i][j] = img[i][j] * w; \r\n }\r\n }\r\n} \r\n\r\n/*###############################################################################################################\r\n#\tPurpose: Compress Image using a simple compress algorithm\r\n#\tGlobal Variable Used: img matrix\r\n#\tOutput: compressed image array (625 x 2 ) = max length (value x count of occurances)\r\n###############################################################################################################*/\r\nint Compression(){\r\n int num = 1 ;\r\n int count = 0;\r\n ROW: for(i = 0; i < row; i++){\r\n COL: for(j = 0; j < col; j++){\r\n if (j == (col-1) && (img[i][j] == img[i+1][j-(col-1)])){ //if position is on last col of row - Check next row - first col\r\n num = num + 1; // add one to num\r\n } \r\n else if (img[i][j] == img[i][j+1]){ //if position is anywhere else than last col, check next col. \r\n num = num + 1;\r\n }\r\n else{ //if the next item is not the same, store the value into an array\r\n comp_img[count][0] = img[i][j]; //store the intensity value into position 0\r\n comp_img[count][1] = num; //store the count into position 1\r\n count = count + 1; //increase array position \r\n num = 1; //reset num counter\r\n }\r\n }\r\n }\r\n return count;\r\n}\r\n\r\n/*###############################################################################################################\r\n#\tPurpose: Decompress the previous Compressed Image using a simple decompression algorithm\r\n#\tGlobal Variable Used: comp_img matrix, img matrix\r\n#\tOutput: decompressed matrix (25 x 25)\r\n###############################################################################################################*/\r\nvoid Decompression(int d_col){\r\n\r\n//Loop through the decompression image \r\n int im_row = 0;\r\n int im_col = 0;\r\n int value, count;\r\n for(int p = 0; p < d_col; p++){ \t\t//Go through each pixel in decompressed image array\r\n value = comp_img[p][0]; \t\t//store the value\r\n count = comp_img[p][1]; \t\t//store the count of the values\r\n for (i = 0; i < count; i++){ \t\t//Assign the Value - Count - Amount of times while updating the position. \r\n decom_img[im_row][im_col] = value; \t//Assign Value \r\n \r\n //Move to Next Position\r\n if (im_col % 24 == 0){ \t\t//IF at end of row\r\n im_row = im_row + 1; \t\t//Move to next row\r\n im_col = im_col - 24; \t\t//move to col 0\r\n } else { \t\t//if not at end of row\r\n im_col = im_col + 1; \t\t//update position to move to the next position\r\n }\r\n }\r\n }\r\n}\r\n\r\n/*###############################################################################################################\r\n#\tPurpose: Prints out all matrixs to the terminal\r\n###############################################################################################################*/\r\nvoid PrintOut(int count) //For Verifying Design is being manipulated (testing)\r\n{\r\n printf(\"Image: \\n\");\r\n ROW1: for(i = 0; i < row; i++){\r\n COL1: for(j = 0; j < col; j++){\r\n printf(\"%d, \", img[i][j]);\r\n }\r\n printf(\"\\n\");\r\n } \r\n\r\n printf(\"Binary \\n\");\r\n ROW2: for(i = 0; i < row; i++){\r\n COL2: for(j = 0; j < col; j++){\r\n printf(\"%d, \", b[i][j]);\r\n }\r\n printf(\"\\n\");\r\n }\r\n\r\n printf(\"Zero Pad \\n\");\r\n ROW3: for(i = 0; i < row + 2; i++){\r\n COL3: for(j = 0; j < col + 2; j++){\r\n printf(\"%d, \", zeropad[i][j]);\r\n }\r\n printf(\"\\n\");\r\n }\r\n\r\n printf(\"Average \\n\");\r\n ROW4: for(i = 0; i < row; i++){\r\n COL4: for(j = 0; j < col; j++){\r\n printf(\"%d, \", avg[i][j]);\r\n }\r\n printf(\"\\n\");\r\n }\r\n\r\n printf(\"Inversion \\n\");\r\n ROW5: for(i = 0; i < row; i++){\r\n COL5: for(j = 0; j < col; j++){\r\n printf(\"%d, \", inv[i][j]);\r\n }\r\n printf(\"\\n\");\r\n }\r\n\r\n printf(\"Scaling .5 \\n\");\r\n ROW6: for(i = 0; i < row; i++){\r\n COL6: for(j = 0; j < col; j++){\r\n printf(\"%d, \", sca[i][j]);\r\n }\r\n printf(\"\\n\");\r\n }\r\n\t\r\n\tprintf(\"Compressed Image\\n\");\r\n for (int n = 0; n <= count; n++){\r\n printf(\"[%d, %d],\", comp_img[n][0], comp_img[n][1]);\r\n if (n % 25 == 0){\r\n printf(\"\\n\");\r\n }\r\n\t}\r\n\tprintf(\"This File got compressed to %d lines \\n\", count);\r\n\t\r\n printf(\"decompressed \\n\");\r\n ROW7: for(i = 0; i < row; i++){\r\n COL7: for(j = 0; j < col; j++){\r\n printf(\"%d, \", decom_img[i][j]);\r\n }\r\n printf(\"\\n\");\r\n }\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n/*###############################################################################################################\r\n#\tMain Function to call sub functions\r\n###############################################################################################################*/\r\nint main() \r\n{ \r\n I2B(.5);\t\t\t\t\t\t//Convert to Binary\r\n ZeroPad();\t\t\t\t\t\t//Zero Pad Image\t\r\n Avg();\t\t\t\t\t\t\t//Take Average of Image\r\n Inv();\t\t\t\t\t\t\t//Invert Image\r\n Scale(.5);\t\t\t\t\t\t//Scale Image\r\n int count = Compression();\t\t//Compress Image\r\n Decompression(count);\t\t\t//Decompress Image\r\n PrintOut(count);\t\t\t\t//print all images to terminal\r\n}" }, { "alpha_fraction": 0.4690987169742584, "alphanum_fraction": 0.5, "avg_line_length": 29.890411376953125, "blob_id": "7a6c8926a454c4f22a6b291cd4fb9c1dda629c8b", "content_id": "cccb0108dc050e3b540c6be2361ca7081466efd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2330, "license_type": "no_license", "max_line_length": 98, "num_lines": 73, "path": "/Zybo_MiniProjects/Labs_SourceCode/Lab4.c", "repo_name": "Space-Odie/Zybo_Projects", "src_encoding": "UTF-8", "text": "/*----------------------------------------------------------\r\n 520 L Lab 4 Spring 2020\r\n -----------------------------------------------------------\r\n File Name: tbSPI.v\r\n Author: Ian O'Donnell\r\n -----------------------------------------------------------\r\n Version Date Description\r\n 1.0 2-17-2020 Initial Release\r\n -----------------------------------------------------------\r\n Purpose: \r\n Simulates a common synchronous serial communication protocol \r\n called Serial Peripheral Interface for a slave core module.\r\n Full Duplex Mode is used\r\n ----------------------------------------------------------*/\r\n \r\n`timescale 1ns/1ns\r\nmodule tb_SPI(); \r\n \r\n //PARAMETERS\r\n parameter ADDR_WIDTH = 7; //Register Width\r\n parameter DATA_WIDTH = 16; //data Width\r\n\r\n //Local Parameters \r\n localparam CLOCK_CYCLE = 10;\r\n localparam SLOCK_CYCLE = 50;\r\n localparam MOSI_DELAY = 100; // One Duty Cycle of SCLOCK\r\n\r\n //INPUTS\r\n reg CLK; //Clock Input (10Mhz)\r\n reg SCLK; //Synchronous Clock Input (50Mhz)\r\n reg SSN; //Slave Select: active Low - Transmit Bit;\r\n reg MOSI; //Master Output/ Serial Input: (One Bit at a time) \r\n reg [DATA_WIDTH-1:0] RDATA; //Read Data Input (Sent to MISO)\r\n\r\n //OUTPUTS\r\n wire [ADDR_WIDTH-1:0] ADDR; //Address Output (From MISO)\r\n wire [DATA_WIDTH-1:0] WDATA; //Data Output (From MISO) \r\n wire WSTROBE; //Write Strobe (From MISO)\r\n wire MISO; //Master Input/Slave Output: one bit Serial Output (From RDATA)\r\n \r\n\r\n //declare i for for loop\r\n integer i;\r\n\r\n SPI UUT(SCLK, CLK, SSN, MOSI, RDATA, MISO, WSTROBE, WDATA, ADDR); \r\n\r\n //Create Clock Generator\r\n initial begin\r\n CLK = 0;\r\n forever #CLOCK_CYCLE CLK = ~CLK;\r\n end\r\n\r\n //Create SClock Generator\r\n initial begin\r\n SCLK = 0;\r\n forever #SLOCK_CYCLE if (!SSN) SCLK = ~SCLK;\r\n end\r\n\r\n initial \r\n begin \r\n\r\n //Initialize Inputs (ground floating values)\r\n SSN = 1; MOSI = 0;\r\n RDATA = 16'b1100110011001100; //set read data to a 16 bit value\r\n #200 SSN = 0; //Transmit On\r\n\r\n for(i = 0; i<24; i = i + 1) begin //test each operation\r\n #MOSI_DELAY MOSI = ~MOSI; \r\n end\r\n \r\n SSN = 1;\r\n end \r\nendmodule\r\n\r\n" }, { "alpha_fraction": 0.3195020854473114, "alphanum_fraction": 0.3300641179084778, "avg_line_length": 41.426231384277344, "blob_id": "9965368414c89b2c3897bb6a8a25dd33a8c8893d", "content_id": "6b85ce6ebc78f97aa4f458528028068f022d3b35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2651, "license_type": "no_license", "max_line_length": 113, "num_lines": 61, "path": "/Zybo_MiniProjects/MP2/Source_Files/Python Code/Convert.py", "repo_name": "Space-Odie/Zybo_Projects", "src_encoding": "UTF-8", "text": "#################################################################################################################\r\n# Created by Ian O'Donnell\r\n# Date: 4/10/21\r\n#################################################################################################################\r\n# Purpose: \r\n# Convert arrays to be useable in a different scripting language\r\n# The inputs are taken from the terminal output and place in the Files.py \r\n#################################################################################################################\r\n\r\n#################################################################################################################\r\n# Input: Python Format Array\r\n# Output: Matlab format Array\r\n#################################################################################################################\r\ndef c2m(c, title):\r\n print(f'Converting: \\n{title} Image')\r\n temp = []\r\n for i, num in enumerate(c):\r\n length = 25\r\n if (i+1) % length == 0 and i != 0:\r\n temp.append(str(c[i]) + ';')\r\n print(str(temp).replace(\"'\",\"\").replace(\";,\",\";\").replace(\"[\",\"\").replace(\"]\",\"\"))\r\n temp = []\r\n else: \r\n temp.append(str(c[i]))\r\n \r\n print(\"Convert Complete \\n\")\r\n\r\n#Used for the Compressed Image 2 Matlab\r\ndef c2m2(c, title):\r\n print(f'Converting: \\n{title} Image')\r\n temp = []\r\n for i, num in enumerate(c):\r\n length = 25\r\n if (i+1) % 1 == 0:\r\n temp.append(str(c[i]) + ';')\r\n print(str(temp).replace(\"'\",\"\").replace(\";,\",\";\").replace(\"[\",\"\").replace(\"]\",\"\"))\r\n temp = []\r\n else: \r\n temp.append(str(c[i]))\r\n \r\n print(\"Convert Complete \\n\")\r\n\r\n \r\n#################################################################################################################\r\n# Convert 625x2 matrix to a form I can use in C\r\n# I need this so I can decompress the output of the compressed image\r\n#################################################################################################################\r\n# Input: Python Format Array\r\n# Output: C format Array\r\n#################################################################################################################\r\ndef c2c(c, title):\r\n print(f'Converting: \\n{title} Array')\r\n temp = []\r\n for i, num in enumerate(c):\r\n if (i+1) % 2 == 0 and i != 0:\r\n temp.append(str(c[i]) + '},')\r\n else: \r\n temp.append('{' + str(c[i]))\r\n\r\n print(str(temp).replace(\"'\",\"\").replace(\",,\",\",\"))#.replace(\"[\",\"{\").replace(\"]\",\"}\"))\r\n print(\"Convert Complete \\n\")\r\n\r\n" }, { "alpha_fraction": 0.5302482843399048, "alphanum_fraction": 0.5483070015907288, "avg_line_length": 39.40187072753906, "blob_id": "262fab0f8410e4c2d0590c60e0582afd87185a71", "content_id": "9a12dfe71724f9bafabe55950417808527ed6870", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4430, "license_type": "no_license", "max_line_length": 141, "num_lines": 107, "path": "/Zybo_MiniProjects/Labs_SourceCode/Lab2.c", "repo_name": "Space-Odie/Zybo_Projects", "src_encoding": "UTF-8", "text": "/*----------------------------------------------------------\r\n 520 L Lab 2 Spring 2020\r\n -----------------------------------------------------------\r\n File Name: lab2.c\r\n Author: Ian O'Donnell\r\n -----------------------------------------------------------\r\n Version Date Description\r\n 1.0 3-5-2020 Initial Release\r\n 2.0 3-7-2020 Added LED's and Print Statements\r\n -----------------------------------------------------------\r\n Purpose:\r\n read the status of the push buttons as well as dip switches\r\n and writes the corresponding values to the UART terminal\r\n output.\r\n\r\n a. Output sum of the value of dip switches and push buttons\r\n b. Output difference of the value of dip switches and push buttons\r\n c. Output product of the value of dip switches and push buttons\r\n d. Output the remainder of the division of the value of dip switches by push buttons\r\n e. Output the ceiling of square root of value of dip switches (You can use two functions:\r\n ceil and sqrt provided by math.h header file. Search online to see how you can add\r\n math.h to your SDK project.\r\n f. Write a function to calculate xy\r\n\r\n-----------------------------------------------------------*/\r\n/* Include Files */\r\n#include \"xparameters.h\"\r\n#include \"xgpio.h\"\r\n#include \"xstatus.h\"\r\n#include \"xil_printf.h\"\r\n#include \"stdio.h\"\r\n#include \"sleep.h\"\r\n#include <math.h>\r\n\r\n/* Definitions */\r\n#define GPIO_PB XPAR_GPIO_0_DEVICE_ID // GPIO 0\r\n#define GPIO_SW XPAR_GPIO_1_DEVICE_ID //GPIO 1\r\n#define DELAY1 500000 // Software delay length\r\n#define DELAY2 1000000 // Software delay length\r\n#define PB_CHANNEL 1 // GPIO port for PB\r\n#define SW_CHANNEL 1 // GPIO port for SWITCH\r\n#define LED_CHANNEL 2 // GPIO port for LED\r\n\r\nXGpio sw_led_4bits, btns_4bits; // GPIO Device driver instance (switch/button inputs)\r\n\r\nfloat Power(int x, int y)\r\n{\r\n float pow = 1;\r\n for (int i=1; i<=y; i++)\r\n {\r\n pow = pow * x;\r\n }\r\n return pow;\r\n}\r\n\r\nint main(void)\r\n{\r\n\r\n u32 sw_state, pb_state;\r\n\r\n //Initialization Switches (0XF = input, 0x0 = output)\r\n XGpio_Initialize(&sw_led_4bits, GPIO_SW); //Initialization using function XGpio_Initialize for dip switches\r\n XGpio_SetDataDirection(&sw_led_4bits, SW_CHANNEL, 0xF); //Setting data direction using function XGpio_SetDataDirection for dip switches\r\n //Initialization PB\r\n XGpio_Initialize(&btns_4bits, GPIO_PB);\r\n XGpio_SetDataDirection(&btns_4bits, PB_CHANNEL, 0xF);\r\n //initialize LED\r\n XGpio_SetDataDirection(&sw_led_4bits, LED_CHANNEL, 0x0);\r\n\r\n while(1)\r\n {\r\n //Constantly read the push button and dip switches values\r\n sw_state = XGpio_DiscreteRead(&sw_led_4bits, SW_CHANNEL);\r\n pb_state = XGpio_DiscreteRead(&btns_4bits, PB_CHANNEL);\r\n\r\n //and write them to the output terminal using xil_printf function.\r\n xil_printf(\"--------------------------------------------------\\n\");\r\n xil_printf(\"Switch Status is: %d \\n\", sw_state);\r\n xil_printf(\"Push Button Status is: %d \\n\", pb_state);\r\n\r\n //The LEDs represent the corresponding values on dip switches.\r\n XGpio_DiscreteWrite(&sw_led_4bits, LED_CHANNEL, sw_state);\r\n\r\n //Gather results\r\n int sum = sw_state + pb_state;\r\n int difference = sw_state - pb_state;\r\n int product = sw_state * pb_state;\r\n int remainder = sw_state % pb_state;\r\n double root = sqrt(sw_state);\r\n int round = ceil(root);\r\n\r\n float power = Power(sw_state, pb_state); //I did this with int and float. int would not let me do 15^15 so I changed it to a float\r\n\r\n //Also assign a delay between each pair of the write function\r\n usleep(500000);\r\n\r\n //Print Results\r\n xil_printf(\"Switch + Button: %d\\n\",sum);\r\n xil_printf(\"Switch - Button: %d\\n\",difference);\r\n xil_printf(\"Switch * Button: %d\\n\",product);\r\n xil_printf(\"Switch % Button: %d\\n\",remainder);\r\n xil_printf(\"Square Root of Switch: %d\\n\",round);\r\n printf(\"Switches ^ Button: %.0f\\n\",power); //Use printf so float value can be printed\r\n xil_printf(\"--------------------------------------------------\\n\");\r\n\r\n }\r\n}\r\n" } ]
9
adinamitru/Bookflix
https://github.com/adinamitru/Bookflix
53eabd8ed8b8f1666eb58797d8e5af6ae35de249
6974d56571845044c6560880e588db505c2e2f7e
8c8a3e4f686051e136bc2d2534c15d21ae0ed9ca
refs/heads/master
2023-02-18T11:27:47.102467
2021-01-24T21:22:24
2021-01-24T21:22:24
246,121,780
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8837209343910217, "alphanum_fraction": 0.8837209343910217, "avg_line_length": 10, "blob_id": "ff3d93b8a6d9bb33c9bac7a162e0d5e277f6fb36", "content_id": "e6acdc644b6d04d80836dd6dbb0c019d7deacc2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 43, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/server/requirements.txt", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "mysql\nmysql-connector-python\nflask\nrequests" }, { "alpha_fraction": 0.7573529481887817, "alphanum_fraction": 0.7720588445663452, "avg_line_length": 14.11111068725586, "blob_id": "8caa8b8dbe90f069e05d1f17ff5cd42415ee67f3", "content_id": "1f7acf88693d9a2ae63eabfa9dfd6db11160f4cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 136, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/server/Dockerfile", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "FROM python:3.6\n\nWORKDIR /app\n\nCOPY requirements.txt /app\nRUN pip install -r requirements.txt\n\nCOPY server.py /app\nCMD python server.py\n" }, { "alpha_fraction": 0.7593985199928284, "alphanum_fraction": 0.7744361162185669, "avg_line_length": 15.625, "blob_id": "d0dea1c07e36c082e683345835acdae6271890a9", "content_id": "ed8613210f3ba52624a5e764056d84638f785eb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 133, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/admin/Dockerfile", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "FROM python:3.6\n\nWORKDIR /app\nCOPY requirements.txt /app\nRUN pip install -r requirements.txt\n\nCOPY admin.py /app\nCMD python admin.py\n" }, { "alpha_fraction": 0.8399999737739563, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 24, "blob_id": "b5f19941d9f76a1fd39597260fc2bf2546c47065", "content_id": "7b8efc2285c7bda6a39760c96cbee3c7ba8bff44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 25, "license_type": "no_license", "max_line_length": 24, "num_lines": 1, "path": "/run-server.sh", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "docker-compose up server\n" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 25, "blob_id": "02011ed84c4c0d918f7a011c01bcca3e75d63629", "content_id": "8c8505befdf62d7cb2345f48c40d4a5466dcea87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 26, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/run-client.sh", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "docker-compose run client\n" }, { "alpha_fraction": 0.5406532287597656, "alphanum_fraction": 0.5781792998313904, "avg_line_length": 17.662338256835938, "blob_id": "2cc66500dd7e1be5c83dd38982ba2ba7c3d63e27", "content_id": "1a7d8df4c83a876c7cf5ce04c07caa6c6521de76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 1439, "license_type": "no_license", "max_line_length": 57, "num_lines": 77, "path": "/docker-compose.yml", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "version: \"3\"\nservices:\n db:\n image: mysql:8.0\n ports:\n - '3306:3306'\n environment:\n MYSQL_ROOT_PASSWORD: root\n volumes:\n - ./db:/docker-entrypoint-initdb.d/:ro\n deploy:\n placement:\n constraints: [node.role == manager]\n\n server:\n image: adinamitru/bookflix:latest\n tty: true\n ports:\n - \"5000:5000\"\n environment:\n - HOST=db\n - PORT=5000\n depends_on:\n - db\n - auth\n build: ./server\n\n admin:\n image: adinamitru/bookflix:admin\n tty: true\n build: ./admin\n depends_on:\n - server\n\n auth:\n image: adinamitru/bookflix:auth\n tty: true\n ports:\n - \"6000:6000\"\n environment:\n - HOST=db\n - PORT=6000\n build: ./auth\n depends_on:\n - db\n\n client:\n image: adinamitru/bookflix:client\n tty: true\n build: ./client\n stdin_open: true\n depends_on:\n - server\n\ngrafana:\n image: grafana/grafana:latest\n ports:\n - '3000:3000'\n volumes:\n - ./grafana-provisioning/:/etc/grafana/provisioning\n depends_on:\n - db\n environment:\n - GF_SECURITY_ADMIN_USER=bookflix\n - GF_SECURITY_ADMIN_PASSWORD=bookflix\n\n\n visualizer:\n image: dockersamples/visualizer:stable\n ports:\n - 8081:8080\n stop_grace_period: 1m30s\n volumes:\n - \"/var/run/docker.sock:/var/run/docker.sock\"\n deploy:\n placement:\n constraints: [ node.role == manager ]\n\n\n" }, { "alpha_fraction": 0.5041565299034119, "alphanum_fraction": 0.5094282031059265, "avg_line_length": 31.130292892456055, "blob_id": "7144985a424a37b6de294ab06592738f9b961ca6", "content_id": "54f491349fcab34835bd60c7c82eb663d87b3ad3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9864, "license_type": "no_license", "max_line_length": 106, "num_lines": 307, "path": "/server/server.py", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "import json\n\nimport mysql.connector\nimport requests\nfrom mysql.connector import Error, cursor\nfrom flask import Flask, escape, request\nfrom werkzeug.utils import redirect\n\napp = Flask(__name__)\n\nurl_login = 'http://auth:6000/login'\nurl_createAcc = 'http://auth:6000/createAcc'\n\n\[email protected]('/book', methods=['POST'])\ndef add_book():\n \"\"\" Connect to MySQL database \"\"\"\n conn = None\n try:\n conn = mysql.connector.connect(host='db',\n port='3306',\n database='bookflix',\n user='root',\n password='root')\n print(\"DA\")\n\n title = request.values.get('title')\n author_name = request.values.get('author_name')\n publisher = request.values.get('publisher')\n language = request.values.get('language')\n genre = request.values.get('short_description')\n short_description = request.values.get('short_description')\n publishing_year = request.values.get('publishing_year')\n no_pages = request.values.get('no_pages')\n no_readers = request.values.get('no_readers')\n rate = request.values.get('rate')\n awards = request.values.get('awards')\n\n mySql_insert_query = \"\"\"INSERT INTO book (title, author_name, publisher, language, genre,\n short_description, publishing_year, no_pages, no_readers, rate, awards)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n recordTuple = (title, author_name, publisher, language, genre, short_description, publishing_year,\n no_pages, no_readers, rate, awards)\n cursor = conn.cursor()\n cursor.execute(mySql_insert_query, recordTuple)\n conn.commit()\n\n except Error as e:\n print(e)\n\n finally:\n if conn is not None and conn.is_connected():\n conn.close()\n return \"\"\n\n\[email protected]('/createAcc', methods=['POST'])\ndef create_acc():\n name = request.values.get('name')\n user_name = request.values.get('user_name')\n password = request.values.get('password')\n print('Sunt in server in login')\n response = requests.post(\n url_createAcc,\n data={\n 'name': name,\n 'user_name': user_name,\n 'password': password})\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n user_name = request.values.get('user_name')\n password = request.values.get('password')\n print('Sunt in server in login')\n response = requests.post(\n url_login,\n data={\n 'user_name': user_name,\n 'password': password})\n print(response.json())\n return json.dumps(response.text)\n\n\[email protected]('/book', methods=['DELETE'])\ndef delete_book():\n \"\"\" Connect to MySQL database \"\"\"\n conn = None\n try:\n conn = mysql.connector.connect(host='db',\n port='3306',\n database='bookflix',\n user='root',\n password='root')\n\n book_id = request.values.get('book_id')\n sql_select_booking_query = \"Select * from book where book_id = %s\"\n cursor = conn.cursor()\n cursor.execute(sql_select_booking_query, (book_id,))\n exits = cursor.fetchone()\n if exits != \"\":\n sql_Delete_query = \"Delete from book where book_id = %s\"\n cursor = conn.cursor()\n cursor.execute(sql_Delete_query, (book_id,))\n conn.commit()\n\n except Error as e:\n print(e)\n\n finally:\n if conn is not None and conn.is_connected():\n conn.close()\n return \"\"\n\n\n\n\n\n\n\n\n\[email protected]('/book', methods=['GET'])\ndef list_books():\n \"\"\" Connect to MySQL database \"\"\"\n conn = None\n try:\n conn = mysql.connector.connect(host='db',\n port='3306',\n database='bookflix',\n user='root',\n password='root')\n\n print(\"DA\")\n sql_list_query = \"Select * from book\"\n cursor = conn.cursor()\n cursor.execute(sql_list_query)\n record = cursor.fetchall()\n print(\"DA\")\n print(record)\n\n except Error as e:\n print(e)\n\n finally:\n if conn is not None and conn.is_connected():\n conn.close()\n return json.dumps(record)\n\n\[email protected]('/user', methods=['POST'])\ndef list_category():\n \"\"\" Connect to MySQL database \"\"\"\n category_no = 0\n conn = None\n try:\n conn = mysql.connector.connect(host='db',\n port='3306',\n database='bookflix',\n user='root',\n password='root')\n\n category = request.values.get('category')\n cursor = conn.cursor()\n if category == \"read\":\n category_no = 1\n elif category == \"started\":\n category_no = 2\n elif category == \"liked\":\n category_no = 3\n elif category == \"disliked\":\n category_no = 4\n\n print(category_no)\n sql_list_query = \"Select title, author_name from user_client natural join book where list_id = %s\"\n cursor.execute(sql_list_query, (category_no,))\n record = cursor.fetchall()\n\n except Error as e:\n print(e)\n\n finally:\n if conn is not None and conn.is_connected():\n conn.close()\n return json.dumps(record)\n\n\[email protected]('/books', methods=['POST'])\ndef get_book():\n \"\"\" Connect to MySQL database \"\"\"\n conn = None\n try:\n conn = mysql.connector.connect(host='db',\n port='3306',\n database='bookflix',\n user='root',\n password='root')\n\n category = request.values.get('category')\n category_type = request.values.get('category_type')\n sql_select_booking_query = \"\"\"Select * from book where genre = %s\"\"\"\n cursor = conn.cursor()\n cursor.execute(sql_select_booking_query, (category_type,))\n record = cursor.fetchall()\n conn.commit()\n\n except Error as e:\n print(e)\n\n finally:\n if conn is not None and conn.is_connected():\n conn.close()\n return json.dumps(record)\n\n\[email protected]('/read', methods=['POST'])\ndef read_book():\n \"\"\" Connect to MySQL database \"\"\"\n conn = None\n try:\n conn = mysql.connector.connect(host='db',\n port='3306',\n database='bookflix',\n user='root',\n password='root')\n\n read_book_name = request.values.get('read_book_name')\n liked = request.values.get('liked')\n sql_select_booking_query = \"\"\"Select * from book where title = %s\"\"\"\n cursor = conn.cursor()\n cursor.execute(sql_select_booking_query, (read_book_name,))\n record = cursor.fetchone()\n book_id = record[0]\n conn.commit()\n\n mySql_insert_read_query = \"\"\"INSERT INTO user_client (book_id, list_id)\n VALUES (%s, 1) \"\"\"\n cursor = conn.cursor()\n cursor.execute(mySql_insert_read_query, (book_id,))\n conn.commit()\n\n if liked == 'like':\n mySql_insert_liked_query = \"\"\"INSERT INTO user_client (book_id, list_id)\n VALUES (%s, 3) \"\"\"\n cursor = conn.cursor()\n cursor.execute(mySql_insert_liked_query, (book_id,))\n conn.commit()\n elif liked == 'dislike':\n mySql_insert_liked_query = \"\"\"INSERT INTO user_client (book_id, list_id)\n VALUES (%s, 4) \"\"\"\n cursor = conn.cursor()\n cursor.execute(mySql_insert_liked_query, (book_id,))\n conn.commit()\n\n except Error as e:\n print(e)\n\n finally:\n if conn is not None and conn.is_connected():\n conn.close()\n return \"\"\n\n\[email protected]('/start', methods=['POST'])\ndef start_book():\n \"\"\" Connect to MySQL database \"\"\"\n conn = None\n try:\n conn = mysql.connector.connect(host='db',\n port='3306',\n database='bookflix',\n user='root',\n password='root')\n\n stat_book_name = request.values.get('stat_book_name')\n sql_select_booking_query = \"\"\"Select * from book where title = %s\"\"\"\n cursor = conn.cursor()\n cursor.execute(sql_select_booking_query, (stat_book_name,))\n record = cursor.fetchone()\n book_id = record[0]\n genre = record[5]\n print(genre)\n conn.commit()\n\n mySql_insert_started_query = \"\"\"INSERT INTO user_client (book_id, list_id)\n VALUES (%s, 2) \"\"\"\n cursor = conn.cursor()\n cursor.execute(mySql_insert_started_query, (book_id,))\n conn.commit()\n\n sql_select_suggest_query = \"\"\"Select * from book where genre = %s and not(book_id=%s)\"\"\"\n cursor = conn.cursor()\n cursor.execute(sql_select_suggest_query, (genre, book_id,))\n record_suggest = cursor.fetchall()\n conn.commit()\n\n except Error as e:\n print(e)\n\n finally:\n if conn is not None and conn.is_connected():\n conn.close()\n return json.dumps(record_suggest)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7651515007019043, "avg_line_length": 15.375, "blob_id": "8ea3557853e63b0bfeb1b9c2e30909069aaaca80", "content_id": "5cf5cc82e827012d79fa8a9721927d829ff9a75e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 132, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/auth/Dockerfile", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "FROM python:3.6\n\nWORKDIR /app\nCOPY requirements.txt /app\nRUN pip install -r requirements.txt\n\nCOPY auth.py /app\nCMD python auth.py\n\n" }, { "alpha_fraction": 0.5262210369110107, "alphanum_fraction": 0.5383749604225159, "avg_line_length": 30.735713958740234, "blob_id": "d18b65b88f13c987c0617ed7cf755b314445bc47", "content_id": "26ad187cec05050f0b8c8781cc831fe0e407cb1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4443, "license_type": "no_license", "max_line_length": 113, "num_lines": 140, "path": "/client/client.py", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "# Mitru Adina\n# 343C1\nimport json\nimport sys\n\nimport requests\n\nurl = 'http://server:5000/book'\nurl_books = 'http://server:5000/books'\nurl_read = 'http://server:5000/read'\nurl_category = 'http://server:5000/user'\nurl_start = 'http://server:5000/start'\nurl_login = 'http://server:5000/login'\nurl_createAcc = 'http://server:5000/createAcc'\n\n\ndef list_books():\n response = requests.get(\n url)\n return response.text\n\n\ndef get_book(category, category_type):\n response = requests.post(\n url_books,\n data={\n 'category': category,\n 'category_type': category_type})\n\n return response.text\n\n\ndef read_book(read_book_name, liked):\n response = requests.post(\n url_read,\n data={\n 'read_book_name': read_book_name,\n 'liked': liked})\n\n\ndef list_category(category):\n response = requests.post(\n url_category,\n data={\n 'category': category})\n return response.text\n\n\ndef start_book(stat_book_name):\n response = requests.post(\n url_start,\n data={\n 'stat_book_name': stat_book_name})\n return response.text\n\n\ndef login(user_name, password):\n response = requests.post(\n url_login,\n data={\n 'user_name': user_name,\n 'password': password})\n return response.text\n\n\ndef create_account(name, user_name, password):\n response = requests.post(\n url_createAcc,\n data={'name': name,\n 'user_name': user_name,\n 'password': password})\n\n\ndef read_create_account_details():\n name = input(\"Name: \")\n user_name = input(\"User_name: \")\n password = (input(\"Password: \"))\n recordTuple = (name, user_name, password)\n\n return recordTuple\n\n\ndef operation_type():\n while True:\n print(\"Choose operation ('Create account' or 'Login'): \")\n line = sys.stdin.readline()\n ok = 5\n if line == \"Create account\\n\":\n recordTuple = read_create_account_details()\n create_account(recordTuple[0], recordTuple[1], recordTuple[2])\n print(\"User added successfully!\\n\" + \"Do you want you login?\")\n yes_no = input(\"Y/N : \")\n if yes_no == \"Y\":\n ok = 0\n if line == \"Login\\n\":\n user_name = input(\"user_name: \")\n password = input(\"password: \")\n record = json.loads(login(user_name, password))\n print(record)\n if record != \"\":\n ok = 0\n else:\n ok = 1\n if ok == 0:\n print(\"Choose operation ('Recommend a book = 1', 'Mark a book as read = 2', 'Start a new book = 3', \"\n \"'Continue reading = 4', 'List books = 5', 'List category \"\"= 6'): \")\n line = sys.stdin.readline()\n if line == \"1\\n\":\n category = input(\"Please provide the category: \")\n category_type = input(\"Please provide the \" + category + \" you would like: \")\n recommendations = json.loads(get_book(category, category_type))\n print(\"The recommendations: \", recommendations)\n if line == \"2\\n\":\n read_book_name = input(\"Please provide the name of the book: \")\n liked = input(\"Please say if you like or dislike: \")\n read_book(read_book_name, liked)\n print(\"The book \" + liked + \" has been marked as read and \" + liked)\n if line == \"3\\n\":\n stat_book_name = input(\"Please provide the name of the book: \")\n suggestion = start_book(stat_book_name)\n print(\"The book has been added to started list and you might also like: \" + suggestion)\n if line == \"4\\n\":\n list = json.loads(list_category(\"started\"))\n print(list)\n read_book_name = input(\"Which one would you like to continue reading: \")\n if line == \"5\\n\":\n list = json.loads(list_books())\n print(list)\n if line == \"6\\n\":\n category = input(\"Please provide the category: \")\n list = json.loads(list_category(category))\n print(list)\n elif ok == 1:\n print(\"Wrong password or username, please try again \")\n elif ok == 2:\n print(\"You do not have account, please create account \")\n\n\nif __name__ == '__main__':\n operation_type()\n" }, { "alpha_fraction": 0.7573529481887817, "alphanum_fraction": 0.7720588445663452, "avg_line_length": 15.875, "blob_id": "d82f2b6869544ac7cb787ff466cc96dc259775f9", "content_id": "ff8e9c9f8023c088493fb1c05993e31097ce65e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 136, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/client/Dockerfile", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "FROM python:3.6\n\nWORKDIR /app\nCOPY requirements.txt /app\nRUN pip install -r requirements.txt\n\nCOPY client.py /app\nCMD python client.py\n\n" }, { "alpha_fraction": 0.6760299801826477, "alphanum_fraction": 0.699999988079071, "avg_line_length": 45.824562072753906, "blob_id": "cf6c590b88fea09879a3ed521db234ab7a99fece", "content_id": "e8976bc58cfd42703f452631a6521ce002f70bdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2670, "license_type": "no_license", "max_line_length": 359, "num_lines": 57, "path": "/db/init.sql", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "CREATE DATABASE bookflix;\nuse bookflix;\n\n\nCREATE TABLE book (\n book_id int NOT NULL AUTO_INCREMENT,\n title VARCHAR(100),\n author_name VARCHAR(20),\n publisher VARCHAR(20),\n language VARCHAR(20),\n genre VARCHAR(20),\n short_description TEXT,\n publishing_year INT(5),\n no_pages INT(5), \n no_readers INT(7),\n rate INT(2),\t\n awards INT(2) default 0,\n PRIMARY KEY (book_id)\n);\n\nCREATE TABLE user_client (\n book_id INT(6),\n list_id INT(6)\n);\n\nCREATE TABLE user_info (\n user_id INT NOT NULL AUTO_INCREMENT,\n name VARCHAR(20),\n user_name VARCHAR(20),\n password VARCHAR(20),\n PRIMARY KEY (user_id)\n);\n\n\n\nCREATE TABLE book_user (\n user_id INT UNSIGNED NOT NULL,\n book_id INT UNSIGNED NOT NULL,\n PRIMARY KEY (user_id, book_id),\n --CONSTRAINT `Constr_book_user_user_id_fk`\n FOREIGN KEY (user_id) REFERENCES user_info(user_id)\n ON DELETE CASCADE ON UPDATE CASCADE,\n FOREIGN KEY (book_id) REFERENCES book(book_id)\n ON DELETE CASCADE ON UPDATE CASCADE\n)\n\nINSERT INTO book (title, author_name, publisher, language, genre, short_description, publishing_year, no_pages, no_readers, rate, awards)\nVALUES ('The Fellowship of the Ring', 'J. R. R. Tolkien', 'Allen & Unwin', 'English', 'fantasy', 'The title of the novel refers to the storys main antagonist, the Dark Lord Sauron,[a] who had in an earlier age created the One Ring to rule the other Rings of Power as the ultimate weapon in his campaign to conquer and rule all of Middle-earth', 1954, 427, 552, 9.2, 5);\n\nINSERT INTO book (title, author_name, publisher, language, genre, short_description, publishing_year, no_pages, no_readers, rate, awards)\nVALUES ('c', 'Irving Stone', 'Allen & Unwin', 'English', 'biographical novel', 'It is Stones first major publication and is largely based on the collection of letters between Vincent van Gogh and his younger brother, art dealer Theo van Gogh. They lay the foundation for most of what is known about the thoughts and beliefs of the artist.', 1934, 576, 241, 8.7, 1);\n\nINSERT INTO book (title, author_name, publisher, language, genre, short_description, publishing_year, no_pages, no_readers, rate, awards)\nVALUES ('Harry Potter and the Goblet of Fire', 'J. K. Rowling', 'Scholastic', 'English', 'fantasy', 'It follows Harry Potter, a wizard in his fourth year at Hogwarts School of Witchcraft and Wizardry, and the mystery surrounding the entry of Harrys name into the Triwizard Tournament, in which he is forced to compete.', 2000, 636, 562, 8.9, 2);\n\nINSERT INTO user_info (name, user_name, password) VALUES ('Ana Popescu', 'ana', 'pass');\nINSERT INTO user_info (name, user_name, password) VALUES (Adina, adina, pass);\n\n" }, { "alpha_fraction": 0.5130681991577148, "alphanum_fraction": 0.5207386612892151, "avg_line_length": 28.090909957885742, "blob_id": "27c3ce220d67a06d50b3410a1d8e2cbec2644a84", "content_id": "9bd956d3da5ecbe385305467a41e9d896e951bb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3520, "license_type": "no_license", "max_line_length": 89, "num_lines": 121, "path": "/auth/auth.py", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "import json\nimport sys\n\nimport requests\nimport mysql.connector\nfrom mysql.connector import Error, cursor\nfrom flask import Flask, escape, request\nfrom werkzeug.utils import redirect\n\napp = Flask(__name__)\n\n# url_createAcc = 'http://server:5000/createAcc'\n\n\n# url_login = 'http://server:5000/login'\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n \"\"\" Connect to MySQL database \"\"\"\n conn = None\n try:\n conn = mysql.connector.connect(host='db',\n port='3306',\n database='bookflix',\n user='root',\n password='root')\n\n user_name = request.values.get('user_name')\n password = request.values.get('password')\n sql_list_query = \"Select * from user_info WHERE user_name = %s AND password = %s\"\n cursor = conn.cursor()\n cursor.execute(sql_list_query, (user_name, password,))\n record = cursor.fetchone()\n print(record)\n conn.commit()\n\n except Error as e:\n print(e)\n\n finally:\n if conn is not None and conn.is_connected():\n conn.close()\n return json.dumps(record)\n\n\[email protected]('/createAcc', methods=['POST'])\ndef create_acc():\n \"\"\" Connect to MySQL database \"\"\"\n conn = None\n try:\n conn = mysql.connector.connect(host='db',\n port='3306',\n database='bookflix',\n user='root',\n password='root')\n print(\"DA\")\n\n name = request.values.get('name')\n user_name = request.values.get('user_name')\n password = request.values.get('password')\n\n mySql_insert_query = \"\"\"INSERT INTO user_info (name, user_name, password)\n VALUES (%s, %s, %s) \"\"\"\n recordTuple = (name, user_name, password)\n cursor = conn.cursor()\n cursor.execute(mySql_insert_query, recordTuple)\n conn.commit()\n\n except Error as e:\n print(e)\n\n finally:\n if conn is not None and conn.is_connected():\n conn.close()\n # return json.dumps(recordTuple)\n return \"\"\n\n\n# def create_account(name, user_name, password):\n# response = requests.post(\n# url_createAcc,\n# data={'name': name,\n# 'user_name': user_name,\n# 'password': password})\n\n\n# def list_users():\n# response = requests.get(\n# url_createAcc)\n# return response.text\n\n\n# def login(user_name, password):\n# response = requests.get(url_createAcc, )\n# return response.text\n\n\n# def operation_type():\n# while True:\n# print(\"Choose operation ('Create account' or 'Log in'): \")\n# line = sys.stdin.readline()\n# if line == \"Create account\\n\":\n# recordTuple = read_add_details()\n# create_account(recordTuple[0], recordTuple[1], recordTuple[2])\n# print(\"User added successfully!\")\n# if line == \"Login\\n\":\n# user_name = input(\"user_name: \")\n# password = input(\"password: \")\n# login(user_name, password)\n# # print(\"Book deleted successfully!\")\n# # if line == \"List users\\n\":\n# # list = list_users()\n# # print(list)\n\n\n# if __name__ == '__main__':\n# operation_type()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=6000)\n" }, { "alpha_fraction": 0.5133062601089478, "alphanum_fraction": 0.5218763947486877, "avg_line_length": 33.372093200683594, "blob_id": "ecad3845164a08bff49b6890a46d0056c3c8f453", "content_id": "376e7b286e17a0d872621d5dd19f023d7156c8c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4434, "license_type": "no_license", "max_line_length": 120, "num_lines": 129, "path": "/admin/admin.py", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "# Mitru Adina\n# 343C1\nimport json\nimport sys\n\nimport requests\n\nurl = 'http://server:5000/book'\nurl_login = 'http://server:5000/login'\nurl_createAcc = 'http://server:5000/createAcc'\n\n\ndef delete_book(book_id):\n response = requests.delete(\n url,\n data={'book_id': book_id})\n\n\ndef list_books():\n response = requests.get(\n url)\n return response.text\n\n\ndef add_book(title, author_name, publisher, language, genre, short_description, publishing_year,\n no_pages, no_readers, rate, awards):\n response = requests.post(url,\n data={'title': title,\n 'author_name': author_name,\n 'publisher': publisher,\n 'language': language,\n 'genre': genre,\n 'short_description': short_description,\n 'publishing_year': publishing_year,\n 'no_pages': no_pages,\n 'no_readers': no_readers,\n 'rate': rate,\n 'awards': awards})\n\n\ndef read_add_details():\n book_id = int(input(\"Add book_id: \"))\n title = input(\"Add title: \")\n author_name = input(\"Add author name: \")\n publisher = (input(\"Add publisher: \"))\n language = (input(\"Add language: \"))\n genre = (input(\"Add the genre of the book: \"))\n short_description = (input(\"Add a short description: \"))\n publishing_year = int(input(\"Add the publishing year: \"))\n no_pages = int(input(\"Add the number of pages: \"))\n no_readers = int(input(\"Add the number of readers: \"))\n rate = float(input(\"Add the rate: \"))\n awards = int(input(\"Add the awards number: \"))\n recordTuple = (title, author_name, publisher, language, genre, short_description, publishing_year,\n no_pages, no_readers, rate, awards, book_id)\n\n return recordTuple\n\n\ndef login(user_name, password):\n response = requests.post(\n url_login,\n data={\n 'user_name': user_name,\n 'password': password})\n return response.text\n\n\ndef create_account(name, user_name, password):\n response = requests.post(\n url_createAcc,\n data={'name': name,\n 'user_name': user_name,\n 'password': password})\n\n\ndef read_create_account_details():\n name = input(\"Name: \")\n user_name = input(\"User_name: \")\n password = (input(\"Password: \"))\n recordTuple = (name, user_name, password)\n\n return recordTuple\n\n\ndef operation_type():\n while True:\n print(\"Choose operation ('Create account' or 'Login'): \")\n line = sys.stdin.readline()\n ok = 5\n if line == \"Create account\\n\":\n recordTuple = read_create_account_details()\n create_account(recordTuple[0], recordTuple[1], recordTuple[2])\n print(\"User added successfully!\\n\" + \"Do you want you login?\")\n yes_no = input(\"Y/N : \")\n if yes_no == \"Y\":\n ok = 0\n if line == \"Login\\n\":\n user_name = input(\"user_name: \")\n password = input(\"password: \")\n record = json.loads(login(user_name, password))\n print(record)\n if record != \"\":\n ok = 0\n else:\n ok = 1\n if ok == 0:\n print(\"Choose operation ('Add book' or 'Delete book'): \")\n line = sys.stdin.readline()\n if line == \"Add book\\n\":\n recordTuple = read_add_details()\n add_book(recordTuple[0], recordTuple[1], recordTuple[2], recordTuple[3], recordTuple[4], recordTuple[5],\n recordTuple[6], recordTuple[7], recordTuple[8], recordTuple[9], recordTuple[10])\n print(\"Book added successfully!\")\n if line == \"Delete book\\n\":\n book_id = input(\"Insert the book id for cancellation: \")\n delete_book(book_id)\n print(\"Book deleted successfully!\")\n if line == \"List books\\n\":\n list = list_books()\n print(list)\n elif ok == 1:\n print(\"Wrong password or username, please try again \")\n elif ok == 2:\n print(\"You do not have account, please create account \")\n\n\nif __name__ == '__main__':\n operation_type()\n" }, { "alpha_fraction": 0.8399999737739563, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 24, "blob_id": "de002203a0da267d5e9eb94944815fea0f7e39aa", "content_id": "9fb4b9458d8c0001a9c42e4f44ffe5f07913d12c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 25, "license_type": "no_license", "max_line_length": 24, "num_lines": 1, "path": "/run-admin.sh", "repo_name": "adinamitru/Bookflix", "src_encoding": "UTF-8", "text": "docker-compose run admin\n" } ]
14
Py-Lambdas/office-hours-django
https://github.com/Py-Lambdas/office-hours-django
927d3bde9cb94bc2d2d1e683acb5565a125f70db
40caa4e42a8bb9e4ba56981503cd0a279da17ed7
0fe9a23d77c04625fa8c363bf4044aea361b8408
refs/heads/main
2023-01-06T02:27:25.375527
2020-11-03T04:41:47
2020-11-03T04:41:47
309,574,909
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5459940433502197, "alphanum_fraction": 0.5615726709365845, "avg_line_length": 36.44444274902344, "blob_id": "e6138a277f2647e76d191e3a6dbb35fb2e300322", "content_id": "168d6034c83949c017a52a854ed9d536e88b8793", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1348, "license_type": "no_license", "max_line_length": 147, "num_lines": 36, "path": "/tasks/migrations/0001_initial.py", "repo_name": "Py-Lambdas/office-hours-django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.3 on 2020-11-03 04:19\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('name', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Task',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=255)),\n ('description', models.TextField()),\n ('complete', models.BooleanField(default=False)),\n ('priority', models.CharField(choices=[('U', 'Urgent'), ('H', 'High'), ('M', 'Medium'), ('L', 'Low')], default='L', max_length=1)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tasks', to='tasks.project')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.8130081295967102, "alphanum_fraction": 0.8130081295967102, "avg_line_length": 19.66666603088379, "blob_id": "d053110b3a3f5bdf94d9a9e6fe8633334739ed62", "content_id": "6865e168cf335694b5be78654c3ce219926e3eb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 123, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/tasks/admin.py", "repo_name": "Py-Lambdas/office-hours-django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import Task, Project\n\nadmin.site.register(Task)\nadmin.site.register(Project)" }, { "alpha_fraction": 0.6479925513267517, "alphanum_fraction": 0.6535947918891907, "avg_line_length": 29.600000381469727, "blob_id": "42207f7a84cf380194194f863b979824ff7a5e95", "content_id": "ebcdf4495e2744f4cfb356e794eb18795d71c121", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1071, "license_type": "no_license", "max_line_length": 79, "num_lines": 35, "path": "/tasks/models.py", "repo_name": "Py-Lambdas/office-hours-django", "src_encoding": "UTF-8", "text": "import uuid\n\nfrom django.db import models\n\n\nclass Task(models.Model):\n class TaskPriority(models.TextChoices):\n URGENT = \"U\", \"Urgent\"\n HIGH = \"H\", \"High\"\n MEDIUM = \"M\", \"Medium\"\n LOW = \"L\", \"Low\"\n\n id = models.UUIDField(primary_key=True, editable=False, default=uuid.uuid4)\n name = models.CharField(max_length=255)\n description = models.TextField()\n complete = models.BooleanField(default=False)\n priority = models.CharField(\n max_length=1, choices=TaskPriority.choices, default=TaskPriority.LOW\n )\n project = models.ForeignKey(\n \"Project\", on_delete=models.CASCADE, related_name=\"tasks\", null=True\n )\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return f\"{self.name} <{self.priority}>\"\n\n\nclass Project(models.Model):\n id = models.UUIDField(primary_key=True, editable=False, default=uuid.uuid4)\n name = models.TextField()\n\n def __str__(self):\n return f\"{self.name} <Tasks: {self.tasks}>\"\n" } ]
3
t03r1cht/rub-crypto-examples
https://github.com/t03r1cht/rub-crypto-examples
41df494367b5681d87c17f8e6a703060c80c6ea9
ac1700fd1542f9b5e80ebbb3cdb599626ced64eb
44c81c95d26338805f44bd5fd9793c3cb0a2b157
refs/heads/main
2023-02-18T21:09:17.445238
2021-01-24T12:04:44
2021-01-24T12:04:44
332,421,615
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.48281723260879517, "alphanum_fraction": 0.5260574221611023, "avg_line_length": 25.479999542236328, "blob_id": "3c410334e470a57aee4b2c422d7097da48c77096", "content_id": "a20959b42bf372cf3611db357255b32db82a423c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5296, "license_type": "no_license", "max_line_length": 120, "num_lines": 200, "path": "/rsa-attacks.py", "repo_name": "t03r1cht/rub-crypto-examples", "src_encoding": "UTF-8", "text": "from math import gcd\nfrom math import sqrt\nfrom math import floor\nimport primefac\nfrom fractions import Fraction\n\ndef factorize_n(n, e, d, a=2):\n \"\"\"\n Factor both primes p, q from n using the naive approach.\n \n n: the modulus to factor into p, q (p, q prime)\n e: the public RSA key\n d: the private RSA key\n a: a random number from the natural numbers excluding 0 (Z_n*)\n\n See RUB script page 88\n \"\"\"\n \n # check if a coprime n (gcd(a,n) = 1)\n if not gcd(a,n) == 1:\n print(\"a, n not coprime\")\n return\n else:\n print(\"a, n coprime\")\n\n # factor e*d-1 into 2^s * u\n factors=get_prime_factors(e*d-1)\n\n # check if 2 is a prime factor, if not we can't continue with attack\n if not 2 in factors:\n print(\"2 not a prime factor of e*d-1\")\n return\n\n s=factors[2]\n # pop key 2, for easier computation of u\n factors.pop(2)\n # calculate u (multiplication of all prime factors except 2)\n u = 1\n for key in factors:\n u *= key**factors[key]\n \n print(\"s ==>\", s)\n print(\"u ==>\", u)\n\n # compute a^u mod n\n a_u=a**u%n\n \n # compute j_i for each j in (0, ..., s-1): j_i = gcd((a^u)^2^j - 1,n) = p IFF p NEQ 1, p NEQ n\n p=0\n for i in range(s):\n j_i=gcd(a_u**2**i-1,n)\n if not j_i==1 and not j_i==n:\n p=j_i\n break\n \n if p==0:\n print(\"could not find p in iterations, attack failed\")\n return\n\n # compute q using n and p\n q=n//p\n if not p*q==n:\n print(\"n was not correctly factored\")\n return\n\n print()\n print(\"p ==>\", p)\n print(\"q ==>\", q)\n\ndef rsa_attack_small_e(p_1, p_2, p_3):\n \"\"\"\n Takes 3 tuples with (n_i, e_i, c_i) (attack on 3 parties)\n c_i must be equal for all 3 tuples\n \"\"\"\n # unwind tuples \n n_1=p_1[0]\n e_1=p_1[1]\n c_1=p_1[2]\n\n n_2=p_2[0]\n e_2=p_2[1]\n c_2=p_2[2]\n\n n_3=p_3[0]\n e_3=p_3[1]\n c_3=p_3[2]\n\n # check if the public key is the same for all 3 parties\n if not e_1==e_2==e_3:\n print(\"public key e not equal for all 3 parties, cannot perform attack\")\n return\n \n # use the CRT (by hand) to solve the system of congruences computing c (ciphertext that satisfies all 3 congruences)\n # c = c_1 mod n_1\n # c = c_2 mod n_2\n # c = c_3 mod n_3\n \n # modulus to reduce final c\n N=n_1*n_2*n_3\n\n # find x_1, x_2, x_3 so that:\n # x_1: n_2 * n_3 * x_1 = 1 mod n_1 --> c_1 * n_2 * n_3 * x_1 = c_1 * 1 = c_1 mod n_1\n # x_2: n_3 * n_1 * x_2 = 1 mod n_2 --> c_2 * n_3 * n_1 * x_2 = c_2 * 1 = c_2 mod n_2\n # x_3: n_1 * n_2 * x_3 = 1 mod n_3 --> c_3 * n_1 * n_2 * x_3 = c_3 * 1 = c_3 mod n_3\n #\n # in other words: x_1 is the modular multiplicative inverse from n_2 * n_3 (mod n_1)\n\n x_1 = pow(n_2*n_3, -1, n_1)\n x_2 = pow(n_3*n_1, -1, n_2)\n x_3 = pow(n_1*n_2, -1, n_3)\n print()\n print(\"x_1 ==>\", x_1)\n print(\"x_2 ==>\", x_2)\n print(\"x_3 ==>\", x_3)\n \n # assemble the result of the CRT\n c = c_1 * n_2 * n_3 * x_1\n c += c_2 * n_3 * n_1 * x_2\n c += c_3 * n_1 * n_2 * x_3\n # reduce mod N\n c=c%N\n print(\"c ==>\", c)\n\n # since c = m^e (mod n_1*n_2*n_3), compute the e-th root of c\n # all public keys (e) are equal\n e=e_1\n print(\"m ==>\", round(c**(1/float(e))))\n\ndef rsa_attack_small_d(n,e):\n \"\"\"\n Requires a modulus n=p*q, p, q prime and the public RSA key e\n\n Relies heavily on the fact that edg=k*floor((edg/k))+g and p+q=-floor(edg/k)+n+1\n \"\"\"\n pass\n\ndef get_prime_factors(x):\n \"\"\"\n Factorizes a give number into a list of prime factors\n \"\"\"\n return primefac.factorint(x)\n \n\ndef number_to_cf(n,d=1):\n \"\"\"\n Requires a numerator and denominator: n/d\n Calculate the continued fraction of the given number (ger.: kettenbruchentwicklung)\n\n n/d = <q_0, ... , q_m>\n n/1 = <n>\n \"\"\"\n # initialize the lists that hold the q_i, r_i values we need to compute the continued fractions\n q_i=[]\n r_i=[]\n\n if d==0:\n print(\"cant divide by zero\")\n return\n\n # if the give fraction is a natural number (denominator is 1)\n if d==1:\n q_i.append(n)\n return q_i\n\n # init q_0, r_0\n q_i.append(int(floor(Fraction(n,d))))\n # use double index to access the un-floored q_0 (0: un-floored, 1: floored)\n r_i.append(Fraction(n,d)-q_i[0])\n \n # TODO: exactly when does it stop?\n # runs until q_i=i\n #\n # in other words: continue, until q_i is an integer ==> float(Fraction(n,d)).is_integer()\n # we calculate the next iteration generally as:\n # q_i = floor(1/(r_(i-1)))\n # r_i = 1/(r_(i-1)) - q_i\n i = 1\n while True:\n # index 0 is the un-floored previous q_i\n q = floor(Fraction(1, r_i[i-1]))\n q_i.append(int(q))\n\n # perform exit condition check, if it fails, continue with computation of r\n if i == q:\n break\n r = Fraction(1, r_i[i-1]) - Fraction(q, 1)\n r_i.append(r)\n\n i+=1\n \n return q_i\n\nif __name__ == '__main__':\n #factorize_n(n=667,e=3, d=411)\n # rsa_attack_small_e(p_1=(289,3,120),\n # p_2=(529,3,413),\n # p_3=(319,3,213))\n print(number_to_cf(4,11))\n print(number_to_cf(n=5))\n print(number_to_cf(1234,57))\n" } ]
1
bosley-splunk/jarvis
https://github.com/bosley-splunk/jarvis
2d498656364fa42b2774550af1b41f65f82a5a22
a80514e2d0ca872bbfdd72bb1a3df3ec1167f7f9
ab2acc56b6047fa26be45ee54ab8db276ebb8199
refs/heads/master
2020-03-27T09:54:33.961581
2018-08-28T02:40:05
2018-08-28T02:40:05
146,382,052
0
0
null
2018-08-28T02:40:36
2018-08-28T02:40:16
2018-08-28T02:40:14
null
[ { "alpha_fraction": 0.622650146484375, "alphanum_fraction": 0.6240257024765015, "avg_line_length": 26.78343963623047, "blob_id": "dad1ea54cd952b01dc2d5702ece52f936748f0e3", "content_id": "45c3fe9ccd7549f940324953f517930d102f1a47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4362, "license_type": "no_license", "max_line_length": 114, "num_lines": 157, "path": "/setup.py", "repo_name": "bosley-splunk/jarvis", "src_encoding": "UTF-8", "text": "import sqlite3 as lite\nfrom sqlite3 import Error\nimport os\nfrom configparser import ConfigParser\nimport logging\nimport shutil\nimport sys\n\n\"\"\"\nJarvis Install Script\nUsage:\n ./setup.py \n \nMake sure you modify the jarvis.cfg file first\n\"\"\"\n\n\n# Functions go below\ndef db_connect(path_to_db):\n \"\"\"\n Attempts to connect to the DB. Pulls configuration information from the config file\n db_name = name of the individual DB file\n db_location = path name to the DB file\n db_path = combination of db_location and name\n :return:\n Returns connection object\n \"\"\"\n\n logging.debug(\"db_path set to: %s\", path_to_db)\n try:\n conn = lite.connect(path_to_db)\n sqliteversion = lite.version\n\n logging.debug(\"SQLite version: %s\", sqliteversion\n )\n\n except Error as e:\n logging.debug(\"Issue connecting to DB:\")\n logging.debug(e)\n sys.exit(1)\n\n return conn\n\n\ndef setup(dblocation, dbpath):\n \"\"\"\n Sets up environment\n db_name = name of the individual DB file\n db_location = path name to the DB file = usually ./db\n db_path = full path name\n\n If the DB exists, it will be backed up and recreated\n\n :return:\n Returns success status\n \"\"\"\n\n # Checking if the DB Directory exists, if not create it\n if not os.path.isdir(dblocation):\n logging.info(\"DB directory not found, creating\")\n\n\n try:\n os.makedirs(dblocation)\n\n except PermissionError:\n logging.critical(\"Unable to create DB Directory at %s. Exiting.\", dblocation)\n sys.exit(1)\n\n except Error as e:\n logging.critical(\"Error encountered while interacting with filesystem\")\n logging.critical(e)\n sys.exit(1)\n\n else:\n logging.info(\"Created DB directory successfully\")\n\n # Check for existence of the db file\n # If it does, back it up\n\n if os.path.isfile(dbpath):\n backup_path = dbpath + \".bak\"\n logging.info(\"DB file found, moving to %s.bak and creating new db\", dbpath)\n\n shutil.move(dbpath, backup_path)\n\n logging.info(\"Made backup, creating new database\")\n\n db = db_connect(dbpath)\n\n logging.info(\"Connected to the DB successfully, building tables now\")\n\n create_tables(db)\n\n\ndef create_tables(db):\n command = db.cursor()\n\n # Build ticket_queue table\n logging.debug(\"Running table create for ticket_queue\")\n command.execute(\"\"\"CREATE TABLE `ticket_queue` (\n `record_number`\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\n `case_number`\tTEXT NOT NULL,\n `creation_timestamp`\tINTEGER NOT NULL,\n `req_uname`\tTEXT NOT NULL,\n `req_uid`\tTEXT NOT NULL,\n `assignee_uname`\tTEXT,\n `assignee_uid`\tTEXT,\n `assignedby_uid`\tTEXT,\n `assignedby_uname`\tTEXT,\n `assigned_timestamp`\tINTEGER,\n `current_status`\tTEXT,\n `closedby_uname`\tTEXT,\n `closedby_uid`\tTEXT,\n `closed_timestamp`\tINTEGER,\n `priority`\tTEXT DEFAULT 'P3',\n `escalated`\tTEXT DEFAULT 'N',\n `escalatedby_uname`\tTEXT,\n `escalatedby_uid`\tTEXT,\n `escalation_date`\tINTEGER\n );\"\"\")\n\n logging.debug(\"Running table create for tech_list\")\n command.execute(\"\"\"CREATE TABLE `tech_list` (\n `record_number`\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n `tech_uid`\tTEXT NOT NULL,\n `tech_name`\tTEXT NOT NULL,\n `manager_name`\tTEXT NOT NULL,\n `manger_uid`\tTEXT NOT NULL\n );\"\"\")\n\n\n# Main execution section below\nif __name__ == '__main__':\n\n # Turn on Logging - cause lord knows I need it\n logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s - [%(process)d] - (%(funcName)s:%(lineno)s) : %(message)s',\n filename='jarvis_setup.log',\n filemode='w'\n )\n\n logging.info(\"Logging initialized. Reading in Configs.\")\n\n # Read in config file - jarvis.cfg\n app_config = ConfigParser()\n app_config.read('jarvis.cfg')\n\n # Setup DB stuff\n logging.debug(\"Reading in various settings\")\n db_name = app_config.get('DEFAULT', 'database_name')\n db_location = app_config.get('DEFAULT', 'database_location')\n db_path = os.path.join(db_location, db_name)\n\n logging.debug(\"starting setup\")\n setup(db_location, db_path)\n" }, { "alpha_fraction": 0.5349934101104736, "alphanum_fraction": 0.5386719703674316, "avg_line_length": 33.09324645996094, "blob_id": "f11f7d131da34eb7088e02aa4bd3caa4b6d61e52", "content_id": "685ddc0e4153031e69fcaefb613504e956abef89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10602, "license_type": "no_license", "max_line_length": 117, "num_lines": 311, "path": "/jarvis.py", "repo_name": "bosley-splunk/jarvis", "src_encoding": "UTF-8", "text": "from slackclient import SlackClient\nimport re\nimport sqlite3 as lite\nfrom sqlite3 import Error\nfrom pytz import timezone\nfrom datetime import datetime\nimport os\nfrom configparser import ConfigParser\nimport logging\nfrom logging.config import fileConfig\nfrom flask import Flask, abort, jsonify, request\nimport hmac\nimport hashlib\nfrom random import randint\nimport json\nfrom logging.handlers import TimedRotatingFileHandler\n\napp = Flask(__name__)\n\n\n# Define functions\ndef logging_setup(log_directory):\n \"\"\"\n Sets up rotating log under log_directory\n :param log_directory:\n :return:\n \"\"\"\n log_file = \"jarvis.log\"\n log_path = os.path.join(log_directory, log_file)\n\n if not os.path.isdir(log_directory):\n os.mkdir(log_directory)\n\n formatter = logging.Formatter('%(name)s - %(levelname)s - [%(process)d] (%(funcName)s:%(lineno)s) : %(message)s')\n logging_level = logging.DEBUG\n handler = logging.handlers.TimedRotatingFileHandler(log_path,\n when='midnight',\n backupCount=5)\n handler.setFormatter(formatter)\n logger = logging.getLogger()\n logger.addHandler(handler)\n logger.setLevel(logging_level)\n\n return logger\n\n\ndef validate_request(request):\n \"\"\"\n Validates the request is officially from slack. See https://api.slack.com/docs/verifying-requests-from-slack\n for more information around this.\n :param request:\n :return:\n \"\"\"\n # Get the our signing secret from the config\n internal_slack_signing_secret = app_config.get('Slack_Settings', 'slack_signing_secret')\n encoded_internal_signing = internal_slack_signing_secret.encode()\n\n # Get what Slack sent us\n sent_slack_signature = request.headers.get('X-Slack-Signature')\n request_timestamp = request.headers.get('X-Slack-Request-Timestamp')\n\n # Get the body of the request. This was seriously a pain.\n request_body = request.get_data()\n request_body = request_body.decode('utf-8')\n version = \"v0\"\n separator = \":\"\n\n # Build the signature line\n request_signature_line = version + separator + request_timestamp + separator + request_body\n encoded_signature_line = request_signature_line.encode()\n\n # Now to hash it\n hashed_signature = hmac.new(encoded_internal_signing, encoded_signature_line, hashlib.sha256)\n hexhashedsignature = \"v0=\" + hashed_signature.hexdigest()\n\n # This took me all day, but it works!\n if hexhashedsignature != sent_slack_signature:\n logging.critical(\"Message not validated! Something is wrong!\")\n validation_error = {'text': 'Your message was\\'t accepted due to invalid signing'}\n return jsonify(validation_error)\n\n else:\n logging.info(\"Message validated. Have a great day\")\n\n\ndef lookup_username(user_id):\n \"\"\"\n Takes the userid and returns the full user name.\n It accomplishes this by connecting to the slack api using users.info\n and getting the real_name_normalized from the results\n This is per Slacks Warning that name will be going away\n sc = slackclient connection\n user_id = user id to look up\n :param sc, user_id:\n :return user_full_name:\n \"\"\"\n\n logging.info(\"Looking up user name from Slack API\")\n profile = sc.api_call(\"users.info\", timeout=None, user=user_id)\n user_full_name = profile['user']['profile']['real_name_normalized']\n\n return user_full_name\n\ndef generate_timestamp():\n \"\"\"\n Generates timestamp for insertion into the DB in epoch format\n Timezone is set to pacific time for standardization\n :return:\n \"\"\"\n\n pacific_time = timezone('America/Los_Angeles')\n current_time = datetime.now(pacific_time)\n timestamp = current_time.timestamp()\n\n return timestamp\n\n\ndef connect_to_db():\n \"\"\"\n Attempts to connect to sqlite db\n db_path = full path db\n :param:\n :return db object:\n \"\"\"\n\n # Check to ensure db directory exists - building full path\n db_dir = os.path.join(app_config.get('DEFAULT', 'source_path'), app_config.get('DEFAULT', 'database_location'))\n\n logging.info(\"Checking to see if db path exists\")\n if not os.path.isdir(db_dir):\n logging.critical(\"Database doesn't exist, please run setup.py\")\n return(\"\", 500)\n\n else:\n db_path = os.path.join(db_dir, app_config.get('DEFAULT', 'database_name'))\n logging.info(\"Connecting to DB at %s\", db_path)\n\n try:\n db = lite.connect(db_path)\n\n except Error as e:\n logging.critical(\"Database connection error: \")\n logging.critical(e)\n return(\"\", 500)\n\n return db\n\n\ndef message_pager(message):\n \"\"\"\n Takes the message, inserts it into the DB and notifies Cloud Support Channel\n Lets the requester know it's been handled\n :param message:\n :return:\n \"\"\"\n\n # Extract the required information from the payload\n submitter_uid = message[\"user\"][\"id\"]\n submitter_name = lookup_username(submitter_uid)\n case_number = message[\"submission\"][\"case_number\"]\n case_priority = message[\"submission\"][\"priority\"]\n case_description = message[\"submission\"][\"description\"]\n channel = message[\"channel\"][\"id\"]\n\n logging.info(\"Sending update to requester\")\n message_response = sc.api_call(\"chat.postEphemeral\", timeout=None,\n channel=channel,\n text=\"Working on request\",\n user=submitter_uid)\n\n logging.info(\"Results of sending message: %s\", message_response['ok'])\n\n db = connect_to_db()\n\n timestamp = generate_timestamp()\n\n c = db.cursor()\n\n c.execute(''' INSERT INTO TICKET_QUEUE(case_number, creation_timestamp, req_uname, req_uid, priority) \n VALUES(?,?,?,?,?)''', (case_number, timestamp, submitter_name, submitter_uid, case_priority))\n\n\n\n\n\n\n# Routing definitions go here\n# Message Receiver end point for custom dialogs\[email protected]('/message_receiver', methods=['Post'])\ndef message_receiver():\n \"\"\"\n Message Endpoint from Slack\n Validates the incoming message\n Pulls the callback_id to determine what app to route to\n Hands off to the specific def for that app to handle\n\n :return:\n \"\"\"\n validate_request(request)\n\n logging.info(\"Received Message from Slack\")\n\n message = json.loads(request.form['payload'])\n request_type = message[\"callback_id\"]\n\n if request_type.startswith('pagerapp'):\n logging.info(\"Received request for the pager app\")\n message_pager(message)\n\n return ('', 200)\n\n\[email protected]('/heartbeat', methods=['POST'])\ndef heartbeat():\n logging.info(\"Heartbeat requested\")\n validate_request(request)\n heartbeat_message = {'text': 'I\\'m Alive'}\n return jsonify(heartbeat_message)\n\n\[email protected]('/page_cs', methods=['POST'])\ndef page_cs():\n \"\"\"\n Processes /page_cs command -\n end goal is to create a custom dialog requesting ticket number and priority\n :return:\n \"\"\"\n validate_request(request)\n\n # Generate random callback_id\n callback_number = randint(10000, 99999)\n callback_id = \"pagerapp-\" + str(callback_number)\n\n # Generate the PopUp\n logging.info(\"Page Request Received - popping dialog\")\n page_dialog = sc.api_call(\"dialog.open\", timeout=None, trigger_id=request.form['trigger_id'],\n dialog={\n \"callback_id\": callback_id,\n \"title\": \"Notify Cloud Support\",\n \"submit_label\": \"Submit\",\n \"notify_on_cancel\": False,\n \"elements\": [\n {\n \"type\": \"text\",\n \"label\": \"Case Number\",\n \"name\": \"case_number\"\n },\n {\n \"type\": \"select\",\n \"label\": \"Priority\",\n \"name\": \"priority\",\n \"options\": [\n {\n \"label\": \"P1\",\n \"value\": \"P1\"\n },\n {\n \"label\": \"P2\",\n \"value\": \"P2\"\n },\n {\n \"label\": \"P3\",\n \"value\": \"P3\"\n },\n {\n \"label\": \"P4\",\n \"value\": \"P4\"\n }\n ]\n },\n {\n \"type\": \"textarea\",\n \"label\": \"Description of issue\",\n \"name\": \"description\",\n \"hint\": \"Be descriptive as possible\"\n },\n ]\n }\n )\n return('', 200)\n\n\n# Main execution section below\nif __name__ == '__main__':\n \"\"\"\n Moving to Flask stand alone vs under Apache\n \"\"\"\n\n # Static configs go here\n APP_CONFIG_FILE = \"jarvis.cfg\"\n\n # Reading in configs\n app_config = ConfigParser()\n app_config.read(APP_CONFIG_FILE)\n log_dir = app_config.get('DEFAULT', 'log_directory')\n\n # Set up logging\n logging = logging_setup(log_dir)\n logging.info(\"Logging initialized - Setting up slack client\")\n\n sc = SlackClient(app_config.get('Slack_Settings', 'bot_oauth_key'))\n\n logging.info(\"Starting Flask\")\n\n if app_config.get('DEFAULT', 'remote_environment') == True:\n cert = app_config.get('SSL', 'cert')\n key = app_config.get('SSL', 'key')\n app.run(ssl_context=(app_config.get('SSL', 'cert'), app_config.get('SSL', 'key')))\n\n else:\n app.run(debug=True)" } ]
2
jonathanspivack/amazon_pricing_alerts
https://github.com/jonathanspivack/amazon_pricing_alerts
6a995059b1c93aaa73ffd3516acef9ee5640dea6
df12d4238d0e70cca66c0bdac5df23dd5d6a1e0d
6d12251af1b341bb7147fc2a090814cbaddaab04
refs/heads/master
2020-03-13T14:11:08.370579
2018-04-26T14:23:49
2018-04-26T14:23:49
131,153,451
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5728484392166138, "alphanum_fraction": 0.5841427445411682, "avg_line_length": 35.449153900146484, "blob_id": "7da58770ad30c06a3f2cf57b3e7669171b483f76", "content_id": "1fc2ef24b6e984b69b488cfe2f204dcafa55e348", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4427, "license_type": "no_license", "max_line_length": 161, "num_lines": 118, "path": "/amazon/src/models/alerts/alert.py", "repo_name": "jonathanspivack/amazon_pricing_alerts", "src_encoding": "UTF-8", "text": "from database import Database\r\nimport requests\r\nfrom fake_useragent import UserAgent\r\nfrom bs4 import BeautifulSoup\r\nimport uuid\r\nimport datetime\r\n\r\nclass Alert(object):\r\n COLLECTION_NAME = 'alerts'\r\n ALERT_TIMEOUT = 10\r\n\r\n def __init__(self,user_email,price_limit,name,url,price,active,alert_id=None,last_checked=None):\r\n self.user_email = user_email\r\n self.price_limit = price_limit\r\n self.name = name\r\n self.url = url\r\n self.price = price\r\n self.active = active\r\n self.alert_id = uuid.uuid4().hex if alert_id is None else alert_id\r\n self.last_checked = datetime.datetime.utcnow() if last_checked is None else last_checked\r\n\r\n @staticmethod\r\n def load_price(url):\r\n ua = UserAgent()\r\n header = {'user-agent': ua.chrome}\r\n page = requests.get(url,headers=header)\r\n soup = BeautifulSoup(page.text, 'lxml')\r\n price = soup.find_all('span', attrs={\"id\": \"priceblock_ourprice\"})\r\n money = price[0].string\r\n try:\r\n return float(money[1:])\r\n except:\r\n clean_money = money.replace(\",\",\"\")\r\n return float(clean_money[1:])\r\n\r\n\r\n def save_to_mongo(self):\r\n #Database.insert(Alert.COLLECTION_NAME, self.json())\r\n Database.update(Alert.COLLECTION_NAME, {\"alert_id\": self.alert_id}, self.json())\r\n\r\n def json(self):\r\n return{\r\n \"user_email\":self.user_email,\r\n \"price_limit\":self.price_limit,\r\n \"name\":self.name,\r\n \"url\":self.url,\r\n \"price\":self.price,\r\n \"active\":self.active,\r\n \"alert_id\":self.alert_id,\r\n \"last_checked\":self.last_checked\r\n }\r\n\r\n\r\n def create_alert(self):\r\n pass\r\n\r\n @classmethod\r\n def find_by_user_email(cls, user_email):\r\n alerts = Database.find(Alert.COLLECTION_NAME, {'user_email': user_email})\r\n alerts_list = []\r\n #items_list = []\r\n for alert in alerts:\r\n x = Alert(alert['user_email'],alert['price_limit'],alert['name'],alert['url'],alert['price'],alert['active'],alert['alert_id'],alert['last_checked'])\r\n alerts_list.append(x)\r\n #item = Item.get_by_id(alert['item_id'])\r\n #items_list.append(item)\r\n\r\n return alerts_list\r\n\r\n\r\n @staticmethod\r\n def find_by_id(alert_id):\r\n alert = Database.find_one(Alert.COLLECTION_NAME, {'alert_id': alert_id})\r\n print(alert)\r\n return Alert(alert['user_email'],alert['price_limit'],alert['name'],alert['url'],alert['price'],alert['active'],alert['alert_id'],alert['last_checked'])\r\n\r\n\r\n def deactivate(self):\r\n self.active = False\r\n self.save_to_mongo()\r\n\r\n def activate(self):\r\n self.active = True\r\n self.save_to_mongo()\r\n\r\n def load_item_price(self):\r\n self.load_price(self.url)\r\n self.last_checked = datetime.datetime.utcnow()\r\n self.save_to_mongo()\r\n\r\n def delete(self):\r\n Database.remove(Alert.COLLECTION_NAME, {'alert_id': self.alert_id})\r\n\r\n @staticmethod\r\n def find_needing_update(minutes_since_update=1):\r\n last_updated_limit = datetime.datetime.utcnow() - datetime.timedelta(minutes=minutes_since_update)\r\n needing_update = Database.find(Alert.COLLECTION_NAME,{\"last_checked\": {\"$lte\": last_updated_limit}, \"active\":True})\r\n alerts_needing_update = []\r\n for alert in needing_update:\r\n x = Alert(alert['user_email'],alert['price_limit'],alert['name'],alert['url'],alert['price'],alert['active'],alert['alert_id'],alert['last_checked'])\r\n alerts_needing_update.append(x)\r\n return alerts_needing_update\r\n\r\n def send_email_if_price_reached(self):\r\n if float(self.price) < float(self.price_limit):\r\n self.send()\r\n print('sending email to {}'.format(self.user_email))\r\n\r\n def send(self):\r\n print(\"calling mailgun api\")\r\n requests.post(\r\n \"https://api.mailgun.net/v3/sandbox22a2125bd425489988f3dac8f5457955.mailgun.org/messages\",\r\n auth=(\"api\", \"key-dc8342eaab860ea9ae9ee5767d15d7f2\"),\r\n\r\n data={\"from\": \"Mailgun Sandbox <[email protected]>\",\r\n \"to\": \"Jonathan Spivack <{}>\".format(\"[email protected]\"),\r\n \"subject\": \"Hello Jonathan Spivack\",\r\n \"text\": \"Congratulations Jonathan Spivack, you just sent an email with Mailgun! You are truly awesome!\"})\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.7323943376541138, "alphanum_fraction": 0.7323943376541138, "avg_line_length": 24, "blob_id": "83fae6080d430ecbfbc98717792d4335079b0133", "content_id": "7520e1ad4a2a5838b955455f915b6baf39631688", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "no_license", "max_line_length": 51, "num_lines": 11, "path": "/amazon/src/alert_updater.py", "repo_name": "jonathanspivack/amazon_pricing_alerts", "src_encoding": "UTF-8", "text": "from database import Database\r\nfrom models.alerts.alert import Alert\r\n\r\nDatabase.initialize()\r\n\r\nalerts_needing_update = Alert.find_needing_update()\r\n\r\nfor alert in alerts_needing_update:\r\n alert.load_item_price()\r\n alert.save_to_mongo()\r\n alert.send_email_if_price_reached()" } ]
2
oysx/dockerUtility
https://github.com/oysx/dockerUtility
c3f0f727e39b6e53ddfae926bbca321dd962bc20
965011165c00d8261c99f5083f66aa72b6c8ce36
b903b56f445e576bbffb3032ca0adcadb01faced
refs/heads/master
2023-01-28T21:23:14.647185
2023-01-08T04:44:02
2023-01-08T04:44:02
196,911,482
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6641566157341003, "alphanum_fraction": 0.6804718971252441, "avg_line_length": 20.29946517944336, "blob_id": "3f62cee2cff7a3602acc86aae9bf3591c1b070ae", "content_id": "ba1056687510b4d2b3734fca7798919c50f705bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3984, "license_type": "no_license", "max_line_length": 99, "num_lines": 187, "path": "/vidocker.sh", "repo_name": "oysx/dockerUtility", "src_encoding": "UTF-8", "text": "run(){\n\techo -n \"$*\"\n\tbash -c \"$*\"\n\techo \"==> $?\"\n}\n\n#check priviledge\nif [ \"$(id -u)\" != \"0\" ];then\n\techo \"must run as root user, please use sudo\"\n\texit 1\nfi\n\n#install packages\nwhich brctl >/dev/null\nif [ $? != 0 ];then\n\techo \"install packages required now\"\n\tapt-get install bridge-utils\n\tif [ $? != 0 ];then\n\t\techo \"failed to get package\"\n\t\texit 1\n\tfi\nfi\n\nIFINDEX=eth0\nIMAGE=$1\n#check mandantory parameters\nif [ \"x$IMAGE\" = \"x\" ];then\n\techo \"usage: $0 <docker image>\"\n\texit 1\nfi\n\n#check existance of the image\nif [ \"$(docker images -q $IMAGE)\" = \"\" ];then\n\techo \"can not find image \\\"$IMAGE\\\"\"\n\texit 1\nfi\n\n#docker running options\nDOCKER_OPTIONS=--privileged=true\nUSER=oysx\nPASSWD=oysx\nUID=1000\nGID=1000\n\n#remove jail for dhclient in docker instance\napparmor_status | grep dhclient\nif [ $? = 0 ];then\n\tapparmor_parser -R /etc/apparmor.d/sbin.dhclient \nfi\n\nstartContainer(){\n\tIMAGE=$1\n\t#CONTAINER=$(docker run -u $UID:$GID --group-add=[sudo] --net=none -dt $DOCKER_OPTIONS $IMAGE )\n\tCONTAINER=$(docker run -u $UID:$GID --net=none -dt $DOCKER_OPTIONS $IMAGE )\n\tif [ $? != 0 ];then\n\t\techo \"start container image $IMAGE failed\"\n\t\texit 1\n\tfi\n\tCONTAINER=$(expr substr $CONTAINER 1 12)\n\techo $CONTAINER\n}\n\n\nsetupNetns(){\n\t#set symbol link to show up netns\n\tCONTAINER=$1\n\tOPCODE=$2\n\tNAMESPACE=$(docker inspect --format='{{ .State.Pid }}' $CONTAINER)\n\tif [ \"$OPCODE\" = \"add\" ];then\n\t\trun mkdir -p /var/run/netns\n\t\trun ln -s /proc/$NAMESPACE/ns/net /var/run/netns/$NAMESPACE\n\telse\n\t\trun unlink /var/run/netns/$NAMESPACE\n\tfi\n}\n\nfindIntf(){\n\tip -o link |cut -d\\ -f2|grep $1 > /dev/null\n\tif [ $? = 0 ];then\n\t\treturn 1\n\tfi\n\treturn 0\n}\n\ncreateBridge(){\n\tfindIntf $1\n\tif [ $? = 1 ];then\n\t\techo \"already created bridge $1\"\n\t\treturn 1\n\tfi\n\n\trun brctl addbr $1\n\trun ifconfig $1 up\n\treturn 1\n}\n\nmoveHostIntf(){\n\tIFINDEX=$1\n\tBRIDGE=bridge-$IFINDEX\n\n\tbridge link show|cut -d\\ -f 2|grep \"^$IFINDEX\\$\"\n\t#brctl show $BRIDGE | grep \"\\<$IFINDEX\\>\" >/dev/null\n\tif [ $? = 0 ];then\n\t\techo \"host interface $IFINDEX already in bridge $BRIDGE\"\n\t\treturn 1\n\tfi\n\n\trun ip addr flush dev $IFINDEX\n\trun brctl addif $BRIDGE $IFINDEX\n}\n\ngetMacAddr(){\n\tip link show $1|grep -o \"link/ether [^[:space:]]*\"|cut -d\\ -f2\n}\nsetMacAddr(){\n\trun ip link set address $2 dev $1\n}\n\ncloneIntfMacAddress(){\n\taddr=$(getMacAddr $1)\n\techo \"$1/$2: host side address is $addr\"\n\tsetMacAddr $2 $addr\n}\n\nattachToBridge(){\n\tCONTAINER=`expr substr $1 1 6`\n\tIFINDEX=$2\n\tBRIDGE=bridge-$IFINDEX\n\tHOST_IF=h-$IFINDEX-$CONTAINER\n\tGUEST_IF=g-$IFINDEX-$CONTAINER\n\n\tNAMESPACE=$(docker inspect --format='{{ .State.Pid }}' $CONTAINER)\n\tif [ $? != 0 ];then\n\t\techo \"can not find container $CONTAINER\"\n\t\treturn 0\n\tfi\n\n\tfindIntf $BRIDGE\n\tif [ $? = 0 ];then\n\t\techo \"bridge $BRIDGE not exist\"\n\t\treturn 0\n\tfi\n\n\tfindIntf $HOST_IF\n\tif [ $? = 1 ];then\n\t\techo \"intf $HOST_IF already exist\"\n\t\treturn 1\n\tfi\n\n\t#create veth pair for container\n\trun ip link add $HOST_IF type veth peer name $GUEST_IF\n\t#cloneIntfMacAddress $HOST_IF $GUEST_IF\n\n\trun brctl addif $BRIDGE $HOST_IF\n\trun ifconfig $HOST_IF up\n\n\t#move interface into container and setup this interface\n\trun ip link set $GUEST_IF netns $NAMESPACE name $IFINDEX\n\n\tsetupNetns $CONTAINER_ID add\n\trun ip netns exec $NAMESPACE ip link set $IFINDEX up\n\t#run ip netns exec $NAMESPACE dhclient $IFINDEX \n\tsetupNetns $CONTAINER_ID remove\n\n\treturn 1\n}\n\nIMAGE=$(docker images -q $IMAGE)\nCONTAINER_ID=$(startContainer $IMAGE)\nif [ $? != 0 ];then\n\texit 1\nfi\ncreateBridge bridge-$IFINDEX\nmoveHostIntf $IFINDEX\nattachToBridge $CONTAINER_ID $IFINDEX\n\n#do some setup for this docker instance such as password, sudoer, etc.\ndocker exec -u root $CONTAINER bash -c \"echo '$USER:$PASSWD'|chpasswd\"\ndocker exec -u root $CONTAINER adduser $USER sudo\n\n#instruction to connect to docker instance console\ndocker exec -it $CONTAINER bash\n\n#instruction to stop and delete containers\n#for i in `docker ps -a|grep \"$IMAGE\"|cut -d\\ -f1|xargs `;do docker stop $i;sudo docker rm $i;done\n\n#docker commit -m \"comments\" $CONTAINER <repo>:<tag>\n\n" }, { "alpha_fraction": 0.6593406796455383, "alphanum_fraction": 0.6739926934242249, "avg_line_length": 53.400001525878906, "blob_id": "35594f14807af125827fe2a2cbabf5862ab0f051", "content_id": "b540474c46f7b5e9b4ff0db2f066b9238ca20df6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 273, "license_type": "no_license", "max_line_length": 95, "num_lines": 5, "path": "/rmdocker.sh", "repo_name": "oysx/dockerUtility", "src_encoding": "UTF-8", "text": "#instruction to stop and delete containers\nfor i in `docker ps -aq|grep \"$1\"|cut -d\\ -f1|xargs `;do docker stop $i;sudo docker rm $i;done\n#for i in `docker ps -aq|grep \"$1\"|cut -d\\ -f1|xargs `;do docker stop $i;done\n\n#docker commit -m \"comments\" $CONTAINER <repo>:<tag>\n\n" }, { "alpha_fraction": 0.5659340620040894, "alphanum_fraction": 0.5879120826721191, "avg_line_length": 21.625, "blob_id": "d2f7ada00ab5f8e0ca85208ad3127b139bc4d725", "content_id": "f7a00fc4d973f655105ef004fcb527f740d06ac8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 364, "license_type": "no_license", "max_line_length": 82, "num_lines": 16, "path": "/loopDocker.sh", "repo_name": "oysx/dockerUtility", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#instruction to iter containers\nop=$1\nif [ \"x$op\" = \"x\" ];then\n\techo \"usage: $0 <operation>\"\n\texit 1\nfi\n\nfor op in $*;do\n\tif [ \"${op:0:1}\" = \"-\" ];then\n\t\tfilter=${op:1};\n\t\tcontinue;\n\tfi\n\t#for i in `docker ps -aq|cut -d\\ -f1|xargs `;do echo \"docker $op $i\";done\n\tfor i in `docker ps -aqf name=$filter |cut -d\\ -f1|xargs `;do docker $op $i;done\ndone\n\n\n" }, { "alpha_fraction": 0.5635725855827332, "alphanum_fraction": 0.566227376461029, "avg_line_length": 31.55246925354004, "blob_id": "aa3b7b1e592a2d4dcbef1f4d302d499b0bfa90d5", "content_id": "9901cf1a55ffa5c7ecb341a3bc60261703c2192d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10547, "license_type": "no_license", "max_line_length": 150, "num_lines": 324, "path": "/docker_image_layer_tool.py", "repo_name": "oysx/dockerUtility", "src_encoding": "UTF-8", "text": "#!/bin/env python\n\nimport os\nimport subprocess\nimport json\nimport re\nimport getopt\nimport sys\nimport pprint\nimport tarfile\nimport errno\nimport shutil\n\n\nclass Docker(object):\n def __init__(self, name):\n self.name = name\n self.layers = []\n\n def command(self, cmd):\n out = subprocess.check_output(\n \"docker {} {}\".format(cmd, self.name),\n shell=True\n )\n return out\n\n def layer_list(self):\n info = self.command(\"inspect\")\n info = json.loads(info)\n driver = info[0][\"GraphDriver\"]\n gd = getattr(self, 'layer_list_{}'.format(driver['Name']))\n out = gd(driver['Data'])\n return out\n\n def layer_list_overlay2(self, data):\n layers = data['LowerDir'].split(':')\n layers = [data['UpperDir']] + layers\n self.layers = layers\n return layers\n\n def layer_find(self, name):\n self.layer_list()\n name = name.strip('/')\n for layer in self.layers:\n print(\"-\".center(20, \"-\"))\n path = os.path.join(layer, name)\n whiteout = name.strip('/').split('/')\n parent = os.path.join(*whiteout[:-1])\n if os.path.exists(path):\n print(\"*****{}\".format(layer))\n if os.path.isdir(path):\n print(os.listdir(path))\n else:\n print(path)\n elif os.path.isdir(parent):\n entries = os.listdir(parent)\n out = [entry for entry in entries if re.search(r'\\.wh\\..*{}'.format(whiteout[-1]), entry)]\n if out:\n print(out)\n\n def layer_strip(self, id):\n pass\n\nclass DockerTar(object):\n def __init__(self, directory):\n self.directory = directory\n\n def sync_cmd_with_layer(self):\n history = [(h[\"created_by\"], index) for h, index in zip(self.history, range(len(self.history)))]\n self.cmds = filter(lambda entity: any([entity[0].startswith(cmd) for cmd in [\"RUN\", \"COPY\", \"ADD\", \"/bin/sh -c #(nop) ADD\"]]), history)\n\n def parse(self):\n with open(os.path.join(self.directory, \"manifest.json\")) as f:\n self.manifest = json.load(f)\n self.manifest = self.manifest[0]\n\n config_file = self.manifest['Config']\n self.layers = self.manifest[\"Layers\"]\n with open(os.path.join(self.directory, config_file)) as f:\n self.config = json.load(f)\n self.history = self.config[\"history\"]\n\n self.sync_cmd_with_layer()\n print(\"Total %d layers\" % len(self.layers))\n pprint.pprint(list(enumerate(self.layers)))\n print(\"Total %d commands\" % len(self.cmds))\n pprint.pprint(list(enumerate(self.cmds)))\n\n def is_last_layer(self, num):\n return num == len(self.layers) - 1\n\n def layer_get_json(self, layer):\n layer = os.path.join(self.directory, layer, \"json\")\n with open(layer) as f:\n layer = json.load(f)\n return layer\n\n def layer_set_json(self, layer, content):\n layer = os.path.join(self.directory, layer, \"json\")\n with open(layer, 'w') as f:\n json.dump(content, f)\n\n def change_last_layer(self):\n print(\"Change last layer\")\n lastlayer = self.layer_get_json(self.layer(-1))\n parentlayer = self.layer_get_json(lastlayer[\"parent\"])\n\n layerdiff = {k: lastlayer[k] for k in set(lastlayer) - set(parentlayer)}\n parentlayer.update(layerdiff)\n self.layer_set_json(lastlayer[\"parent\"], parentlayer)\n\n self.config[\"created\"] = parentlayer[\"created\"]\n\n def layer(self, num):\n return os.path.dirname(self.manifest[\"Layers\"][num])\n\n def change_layer(self, num):\n print(\"Change layer\")\n curlayer = self.layer_get_json(self.layer(num))\n lowlayer = self.layer_get_json(self.layer(num+1)) if num+1 < len(self.layers) else None\n if lowlayer:\n lowlayer[\"parent\"] = curlayer.get(\"parent\", None)\n if not lowlayer[\"parent\"]:\n del lowlayer[\"parent\"]\n\n with open(os.path.join(self.directory, self.layer(num+1), \"json\"), 'w') as f:\n json.dump(lowlayer, f)\n\n def extract(self, num):\n if self.is_last_layer(num):\n self.change_last_layer()\n\n self.change_layer(num)\n\n rmdir = os.path.dirname(self.layers[num])\n shutil.rmtree(os.path.join(self.directory, rmdir))\n del self.layers[num]\n del self.config[\"rootfs\"][\"diff_ids\"][num]\n del self.config[\"history\"][self.cmds[num][1]]\n\n with open(os.path.join(self.directory, \"manifest.json\"), 'w') as f:\n json.dump([self.manifest], f)\n\n with open(os.path.join(self.directory, self.manifest[\"Config\"]), 'w') as f:\n json.dump(self.config, f)\n\ng_params = None\ndef docker_tar(docker):\n tmpdir = \"/tmp/vidocker/\"\n try:\n os.makedirs(tmpdir)\n except OSError as e:\n if errno.EEXIST != e.errno:\n raise\n\n tardir = os.path.join(tmpdir, docker.name.replace(\"/\",\"_\"))\n tarname = os.path.join(tardir + \".tar\")\n\n if not os.path.exists(tarname):\n print(\"Starting to save docker image to %s\" % tarname)\n output = subprocess.check_output(\"docker save -o {} {}\".format(tarname, docker.name), shell=True)\n print(output)\n print(\"created tar file %s\" % tarname)\n\n if not os.path.exists(tardir):\n print(\"Starting to extract tar file\")\n with tarfile.open(tarname) as f:\n f.extractall(tardir)\n\n mytar = DockerTar(tardir)\n mytar.parse()\n return mytar\n\ndef docker_extract(docker):\n mytar = docker_tar(docker)\n mytar.extract(int(g_params))\n\n\ndef docker_list(docker):\n out = docker.layer_list()\n pprint.pprint(out)\n\ndef docker_search(docker):\n docker.layer_find(g_params)\n\ndef docker_walk(docker):\n global g_params\n out = docker.layer_list()\n for index, layer in enumerate(out):\n print(\"[{}]\".format(index) + \"-\".center(20, \"-\"))\n try:\n cmd = g_params.format(layer)\n # print(\"command: {}\".format(cmd))\n output = subprocess.check_output(cmd, shell=True)\n print(output)\n except:\n pass\n\nclass ViPath(object):\n def __getattr__(self, item):\n func = getattr(os.path, item)\n return func\n\n @staticmethod\n def join(path, *paths):\n new_paths = []\n for p in paths:\n new_paths += [p.strip(os.path.sep)]\n return getattr(os.path, 'join')(path, *new_paths)\n\nvipath = ViPath()\n\ndef docker_raw(docker):\n global g_params\n path = g_params\n out = docker.layer_list()\n outer_path = None\n for index, layer in enumerate(out):\n if os.path.exists(vipath.join(layer, path)):\n inner_path = vipath.dirname(path)\n outer_path = vipath.join(layer, inner_path)\n print(\"Found {} in [{}]:{}\".format(path, index, outer_path))\n break\n\n if not outer_path:\n print(\"Can't find %s\" % path)\n sys.exit()\n\n cmd = \"docker run -dt --rm --entrypoint='' -v {outer_path}:{inner_path} {id} /bin/bash\".format(\n outer_path=outer_path,\n inner_path=inner_path,\n id=docker.name)\n print(\"Run command: %s\" % cmd)\n out = subprocess.check_output(cmd, shell=True)\n print(\"Started container: %s\" % out)\n\n\ndef docker_diff(docker):\n global g_params\n base = docker.layer_list()\n base.reverse()\n docker_a = Docker(g_params)\n another = docker_a.layer_list()\n another.reverse()\n\n print(\"{}\\t -- {}\".format(docker.name, docker_a.name))\n common = [b for b, a in zip(base, another) if b == a]\n for c in common:\n print(\"***{}\".format(c))\n\n b = base[len(common):]\n a = another[len(common):]\n for i in range(max(len(a), len(b))):\n try:\n show = b[i]\n except:\n show = \"<null>\"\n\n try:\n show += \" -- \" + a[i]\n except:\n show += \" -- \" + \"<null>\"\n\n print(show)\n\ndef help():\n print(\"Usage: [options] <docker ID or name>\")\n \"help\", \"raw\", \"extract\", \"tar\", \"walk\", \"list\", \"diff\", \"search\"\n print(\"\\t-h\\t:help\")\n print(\"\\t-r <path>\\t:'path' is absolute path in the container. We find the upmost layer contain it and mount it into\"\n \"container to modify it's content directly skipping overlay filesystem\")\n print(\"\\t-x <layer index>\\t:'layer index' is the numeric number of the image's layer which we want to strip from the image\")\n print(\"\\t-t\\t:Save the image into tar file and show each layer's ID and command\")\n print(\"\\t-w <command>\\t:Iterate each layer and execute the <command> on the layer's path. You can use '{}' stands for this path\")\n print(\"\\t-l\\t:List all layers ID\")\n print(\"\\t-d <another image/container ID>\\t:Compare two images/containers layers, for same layer with '*' prefix indicator\")\n print(\"\\t-s <pattern>\\t:Search <pattern> on each layer\")\n\n sys.exit()\n\n'''\nExample:\n# fo findout all \"trusted.overlay.opaque\" xattribute directories which is another whiteout mechanism for directories in overlay2 filesystem\n./test.py -w 'getfattr --absolute-names -R -m \"\" -d {} 2>/dev/null | grep -B3 trusted.overlay.opaque 2>/dev/null' <containerID or imageID> 2>/dev/null\n# to findout all character device with 0:0 deviceID which is the whiteout mechanism for overlay2 filesystem\n./test.py -w 'find {} -type c'\n'''\nif __name__ == \"__main__\":\n if len(sys.argv) <= 1:\n print(\"Usage: %s <dockerID>\" % sys.argv[0])\n sys.exit()\n\n try:\n options, args = getopt.getopt(sys.argv[1:], \"hr:x:tw:ld:s:\", [\"help\", \"raw\", \"extract\", \"tar\", \"walk\", \"list\", \"diff\", \"search\"])\n except getopt.GetoptError:\n sys.exit()\n\n for k, v in options:\n if k in (\"-l\", \"--list\"):\n opcode = docker_list\n elif k in (\"-w\", \"--walk\"):\n opcode = docker_walk\n g_params = v\n elif k in (\"-d\", \"--diff\"):\n opcode = docker_diff\n g_params = v\n elif k in (\"-s\", \"--search\"):\n opcode = docker_search\n g_params = v\n elif k in (\"-t\", \"--tar\"):\n opcode = docker_tar\n elif k in (\"-x\", \"--extract\"):\n opcode = docker_extract\n g_params = v\n elif k in (\"-r\", \"--raw\"):\n opcode = docker_raw\n g_params = v\n elif k in (\"-h\", \"--help\"):\n help()\n else:\n help()\n \n docker = Docker(args[0])\n opcode(docker)\n" }, { "alpha_fraction": 0.7221134901046753, "alphanum_fraction": 0.7514677047729492, "avg_line_length": 29.058822631835938, "blob_id": "6d1633e2ba83273cb3ceab90ca8bca3339610bfd", "content_id": "eb6e923d721c52a997d643192401f15d09d30a08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 511, "license_type": "no_license", "max_line_length": 66, "num_lines": 17, "path": "/zkClient.sh", "repo_name": "oysx/dockerUtility", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nID=$(docker run -dt ubuntu:14.04.3)\n\n#NS=$(docker inspect --format='{{.State.Pid}}' $ID)\n#ip link add zkClient.host type veth peer name zkClient.guest\n#ifconfig zkClient.host up\n#ip link set zkClient.guest netns $NS name eth1\n#brctl addbr zkClient.br\n#brctl addif zkClient.br zkClient.host\n#ifconfig zkClient.br up\n\ndocker network create -d bridge --subnet 172.25.0.0/16 zkClient.br\ndocker network connect zkClient.br $ID\n#docker network inspect zkClient.br\n\ndocker exec -it --privileged $ID bash\n" }, { "alpha_fraction": 0.6473127007484436, "alphanum_fraction": 0.6649666428565979, "avg_line_length": 19.392000198364258, "blob_id": "fb9d13703b4c72c456db95870c59668c61b3a8e1", "content_id": "756eddc97cfc62c3f031e392ba1617e0f7dd8a0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2549, "license_type": "no_license", "max_line_length": 96, "num_lines": 125, "path": "/netUtility.sh", "repo_name": "oysx/dockerUtility", "src_encoding": "UTF-8", "text": "run(){\n\techo -n \"$*\"\n\tbash -c \"$*\"\n\techo \"==> $?\"\n}\n\nstartContainer(){\n\tIMAGE=$1\n\t#CONTAINER=$(docker run -u $UID:$GID --group-add=[sudo] --net=none -dt $DOCKER_OPTIONS $IMAGE )\n\tCONTAINER=$(docker run -u $UID:$GID --net=none -dt $DOCKER_OPTIONS $IMAGE )\n\tif [ $? != 0 ];then\n\t\techo \"start container image $IMAGE failed\"\n\t\texit 1\n\tfi\n\tCONTAINER=$(expr substr $CONTAINER 1 12)\n\techo $CONTAINER\n}\n\n\nsetupNetns(){\n\t#set symbol link to show up netns\n\tCONTAINER=$1\n\tOPCODE=$2\n\tNAMESPACE=$(docker inspect --format='{{ .State.Pid }}' $CONTAINER)\n\tif [ \"$OPCODE\" = \"add\" ];then\n\t\trun mkdir -p /var/run/netns\n\t\trun ln -s /proc/$NAMESPACE/ns/net /var/run/netns/$NAMESPACE\n\telse\n\t\trun unlink /var/run/netns/$NAMESPACE\n\tfi\n}\n\nfindIntf(){\n\tip -o link |cut -d\\ -f2|grep $1 > /dev/null\n\tif [ $? = 0 ];then\n\t\treturn 1\n\tfi\n\treturn 0\n}\n\ncreateBridge(){\n\tfindIntf $1\n\tif [ $? = 1 ];then\n\t\techo \"already created bridge $1\"\n\t\treturn 1\n\tfi\n\n\trun brctl addbr $1\n\trun ifconfig $1 up\n\treturn 1\n}\n\nmoveHostIntf(){\n\tIFINDEX=$1\n\tBRIDGE=bridge-$IFINDEX\n\n\tbridge link show|cut -d\\ -f 2|grep \"^$IFINDEX\\$\"\n\t#brctl show $BRIDGE | grep \"\\<$IFINDEX\\>\" >/dev/null\n\tif [ $? = 0 ];then\n\t\techo \"host interface $IFINDEX already in bridge $BRIDGE\"\n\t\treturn 1\n\tfi\n\n\trun ip addr flush dev $IFINDEX\n\trun brctl addif $BRIDGE $IFINDEX\n}\n\ngetMacAddr(){\n\tip link show $1|grep -o \"link/ether [^[:space:]]*\"|cut -d\\ -f2\n}\nsetMacAddr(){\n\trun ip link set address $2 dev $1\n}\n\ncloneIntfMacAddress(){\n\taddr=$(getMacAddr $1)\n\techo \"$1/$2: host side address is $addr\"\n\tsetMacAddr $2 $addr\n}\n\nattachToBridge(){\n\tCONTAINER=`expr substr $1 1 6`\n\tIFINDEX=$2\n\tBRIDGE=bridge-$IFINDEX\n\tHOST_IF=h-$IFINDEX-$CONTAINER\n\tGUEST_IF=g-$IFINDEX-$CONTAINER\n\n\tNAMESPACE=$(docker inspect --format='{{ .State.Pid }}' $CONTAINER)\n\tif [ $? != 0 ];then\n\t\techo \"can not find container $CONTAINER\"\n\t\treturn 0\n\tfi\n\n\tfindIntf $BRIDGE\n\tif [ $? = 0 ];then\n\t\techo \"bridge $BRIDGE not exist\"\n\t\treturn 0\n\tfi\n\n\tfindIntf $HOST_IF\n\tif [ $? = 1 ];then\n\t\techo \"intf $HOST_IF already exist\"\n\t\treturn 1\n\tfi\n\n\t#create veth pair for container\n\trun ip link add $HOST_IF type veth peer name $GUEST_IF\n\t#cloneIntfMacAddress $HOST_IF $GUEST_IF\n\n\trun brctl addif $BRIDGE $HOST_IF\n\trun ifconfig $HOST_IF up\n\n\t#move interface into container and setup this interface\n\trun ip link set $GUEST_IF netns $NAMESPACE name $IFINDEX\n\n\tsetupNetns $CONTAINER_ID add\n\trun ip netns exec $NAMESPACE ip link set $IFINDEX up\n\t#run ip netns exec $NAMESPACE dhclient $IFINDEX \n\tsetupNetns $CONTAINER_ID remove\n\n\treturn 1\n}\n\n\tsetupNetns $1 add\n\t#setupNetns $1 remove\n" } ]
6
Suraj-Jena/ML
https://github.com/Suraj-Jena/ML
3b5def915b84e321699af14c38783b1f3a2ae088
36c1b4750eb917d883c9717ae953b6570015144f
257bda5fa015fcc6ac269fac45777ad6e4642946
refs/heads/master
2021-05-05T14:08:07.617041
2017-11-21T11:56:42
2017-11-21T11:56:42
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6059364676475525, "alphanum_fraction": 0.6308993101119995, "avg_line_length": 45.14921188354492, "blob_id": "d6b826fbb22528ac4eb5d277039136bb74f52990", "content_id": "40a2ce870b23f7aceddfb3b36fd71f26cb5ff6e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49794, "license_type": "no_license", "max_line_length": 294, "num_lines": 1079, "path": "/python_files/final_is_remittance_model_4_files.py", "repo_name": "Suraj-Jena/ML", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport random\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nimport re\nfrom sklearn import model_selection\nimport string\nimport numpy as np\nimport sklearn\nfrom sklearn import linear_model, datasets,tree\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error,accuracy_score\nfrom matplotlib import pyplot as plt\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier,RandomForestRegressor\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics\nfrom sklearn import ensemble\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\nfrom dateutil.parser import parse\n\n\n\n####################################################################################################\n\n\ndata=pd.read_csv(r\"C:\\Users\\shubham.kamal\\Desktop\\LITM\\Not_Success_rows_ver_clean.csv\", sep=',',encoding='cp1256')\ndata2=pd.read_csv(r\"C:\\Users\\shubham.kamal\\Desktop\\LITM\\Success_rows_final1.csv\", sep=',',encoding='cp1256',low_memory=False)\n\n#data = data[data['page_type_final'] == 'remittance']\n#data2=data2[data2['page_pageType'] == 'REMITTANCE_PAGE']\ndata.loc[data['page_type_final']=='check','page_type']=0\ndata.loc[data['page_type_final']=='envelope','page_type']=1\ndata.loc[(data['page_type_final']!='check') & (data['page_type_final']!='envelope'),'page_type']=2\ndata=data.reset_index(drop=True)\n\ndata3=data.append(data2,ignore_index=True)\ndata3['row_noOfCharacters']=pd.cut(data3['row_noOfCharacters'],bins=10).cat.codes\ndata3=data3.reset_index(drop=True)\n\ndata=data.reset_index(drop=True)\ndata2=data2.reset_index(drop=True)\ndata['row_noOfCharacters']=data3['row_noOfCharacters'].loc[:data.shape[0]-1].reset_index(drop=True)\ndata2['row_noOfCharacters']=data3['row_noOfCharacters'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\ndata=data.reset_index(drop=True)\ndata2=data2.reset_index(drop=True)\n\ndata['remittance_result']=0\nfor i in data['check_checkNumber'].unique():\n for j in data[data['check_checkNumber']==i]['page_pageNumber'].unique():\n temp=pd.DataFrame()\n temp=data[(data['check_checkNumber']==i) & (data['page_pageNumber']==j)]\n temp.reset_index(drop=True,inplace=True)\n first_row=1\n last_row=temp.at[temp.shape[0]-1,'row_rowNumber']\n df=pd.DataFrame()\n df=temp[temp['is_total_final']==1]\n df.sort_values('row_rowNumber', inplace=True)\n df=df.reset_index(drop=True)\n df2 = pd.DataFrame()\n df2 = temp[temp['is_heading'] == 1]\n df2.sort_values('row_rowNumber', inplace=True)\n df2 = df2.reset_index(drop=True)\n if ((df.empty) & (df2.empty)):\n continue\n if df.empty:\n total_row_number=last_row\n else:\n total_row_number=df.reset_index(drop=True).at[df.shape[0]-1,'row_rowNumber']\n data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & (data['row_rowNumber']== total_row_number), 'remittance_result'] = 1\n if df2.empty:\n heading_row_number=first_row\n else:\n heading_row_number=df2.reset_index(drop=True).at[0,'row_rowNumber']\n data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & (data['row_rowNumber']<= heading_row_number),'remittance_result']=0\n data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & ((data['row_rowNumber'] > heading_row_number) & (data['row_rowNumber'] < total_row_number)), 'remittance_result'] = 1\n data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & (data['row_rowNumber'] >= total_row_number), 'remittance_result'] = 0\n\ndata2['remittance_result']=0\nfor i in data2['check_checkNumber'].unique():\n for j in data2[data2['check_checkNumber']==i]['page_pageNumber'].unique():\n temp=pd.DataFrame()\n temp=data2[(data2['check_checkNumber']==i) & (data2['page_pageNumber']==j)]\n temp.reset_index(drop=True,inplace=True)\n first_row=1\n last_row=temp.at[temp.shape[0]-1,'row_rowNumber']\n df = pd.DataFrame()\n df = temp[temp['is_total_final'] == 1]\n df.sort_values('row_rowNumber', inplace=True)\n df = df.reset_index(drop=True)\n df2 = pd.DataFrame()\n df2 = temp[temp['is_heading'] == 1]\n df2.sort_values('row_rowNumber', inplace=True)\n df2 = df2.reset_index(drop=True)\n if ((df.empty) & (df2.empty)):\n continue\n if df.empty:\n total_row_number=last_row\n data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & (\n data2['row_rowNumber'] == total_row_number), 'remittance_result'] = 1\n else:\n\n total_row_number=df.reset_index(drop=True).at[df.shape[0]-1,'row_rowNumber']\n if df2.empty:\n heading_row_number=first_row\n else:\n heading_row_number=df2.reset_index(drop=True).at[0,'row_rowNumber']\n data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & (data2['row_rowNumber']<= heading_row_number),'remittance_result']=0\n data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & ((data2['row_rowNumber'] > heading_row_number) & (data2['row_rowNumber'] < total_row_number)), 'remittance_result'] = 1\n data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & (data2['row_rowNumber'] >= total_row_number), 'remittance_result'] = 0\n\n\ndata.loc[data['page_type']!=2,'remittance_result']=0\ndata2.loc[data2['page_type']!=2,'remittance_result']=0\n\nfor i in range(0,data3.shape[0]):\n s=data3.at[i,'row_string']\n digits = sum(c.isdigit() for c in s)\n letters = sum(c.isalpha() for c in s)\n spaces = sum(c.isspace() for c in s)\n others = len(s) - digits - letters - spaces\n\n total_charac=digits+letters+others\n data3.at[i,'total_digits'] = digits/total_charac*100\n data3.at[i, 'total_letters'] = letters/total_charac*100\n data3.at[i, 'total_spaces'] = spaces/total_charac*100\n data3.at[i, 'total_others'] = others/total_charac*100\n\ndata3['total_digits_coded']=pd.cut(data3['total_digits'],bins=10).cat.codes\ndata3['total_letters_coded']=pd.cut(data3['total_letters'],bins=10).cat.codes\ndata3['total_spaces_coded']=pd.cut(data3['total_spaces'],bins=10).cat.codes\ndata3['total_others_coded']=pd.cut(data3['total_others'],bins=10).cat.codes\ndata3=data3.reset_index(drop=True)\n\n\ndata=data.reset_index(drop=True)\ndata2=data2.reset_index(drop=True)\ndata['total_digits_coded']=data3['total_digits_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\ndata2['total_digits_coded']=data3['total_digits_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\ndata=data.reset_index(drop=True)\ndata2=data2.reset_index(drop=True)\n\ndata['total_letters_coded']=data3['total_letters_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\ndata2['total_letters_coded']=data3['total_letters_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\ndata=data.reset_index(drop=True)\ndata2=data2.reset_index(drop=True)\n\ndata['total_spaces_coded']=data3['total_spaces_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\ndata2['total_spaces_coded']=data3['total_spaces_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\ndata=data.reset_index(drop=True)\ndata2=data2.reset_index(drop=True)\n\ndata['total_others_coded']=data3['total_others_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\ndata2['total_others_coded']=data3['total_others_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\ndata=data.reset_index(drop=True)\ndata2=data2.reset_index(drop=True)\n\n\ndf3=pd.DataFrame()\ndf3=data2.reset_index(drop=True)\n\n\ndata['ratio_row_section']=data['row_noOfCharacters']/data['section_noOfCharacters']\ndf3['ratio_row_section']=df3['row_noOfCharacters']/df3['section_noOfCharacters']\n\n\ndata['amount_col_man']=0\nfor i in range(0,data.shape[0]):\n s=data.at[i,'row_string']\n if '$' in s:\n data.at[i, 'amount_col_man'] = 1\n s = s.replace(',', '')\n s=s.replace('$',' ')\n digits=re.findall(r\"\\s+\\d+\\.\\d+$|\\s+\\d+\\.\\d+\\s+\", s,flags=re.MULTILINE)\n for j in digits:\n if float(j)<=data.at[i,'check_checkAmount']:\n data.at[i,'amount_col_man']=1\n break\n\n\ndf3['amount_col_man']=0\nfor i in range(0,df3.shape[0]):\n s=df3.at[i,'row_string']\n if '$' in s:\n df3.at[i, 'amount_col_man'] = 1\n s = s.replace(',', '')\n s = s.replace('$', ' ')\n digits=re.findall(r\"\\s+\\d+\\.\\d+$|\\s+\\d+\\.\\d+\\s+\", s,flags=re.MULTILINE)\n for j in digits:\n if float(j)<=df3.at[i,'check_checkAmount']:\n df3.at[i,'amount_col_man']=1\n break\n\n\npattern=re.compile(\"Jan(uary)?|Feb(ruary)?|Mar(ch)?|Apr(il)?|May|Jun(e)?|Jul(y)?|Aug(ust)?|Sep(tember)?|Oct(ober)?|Nov(ember)?|Dec(ember)?\\s+\\d{1,2}[,/.]\\s+\\d{4}([0-3]?[0-9][.|/][0-1]?[0-9][.|/](([0-9]{4})|([0-9]{2})))|([0-1]?[0-9][.|/][0-3]?[0-9][.|/](([0-9]{4})|([0-9]{2})))\",re.IGNORECASE)\n\n\ndef dateFlag(x):\n global pattern\n if pattern.search(str(x)) is not None:\n return 1\n else:\n return 0\n\n\ndata['date_flag'] = data['row_string'].apply(dateFlag)\ndf3['date_flag'] = df3['row_string'].apply(dateFlag)\n\n# data['date_amt_combined']=0\n# df3['date_amt_combined']=0\n# data.loc[(data['date_flag']==1) & (data['amount_col_man']==1),'date_amt_combined']=1\n# data.loc[(data['date_flag']==0) & (data['amount_col_man']==1),'date_amt_combined']=1\n# data.loc[(data['date_flag']==1) & (data['amount_col_man']==0),'date_amt_combined']=0\n# data.loc[(data['date_flag']==0) & (data['amount_col_man']==0),'date_amt_combined']=0\n#\n# df3.loc[(df3['date_flag']==1) & (df3['amount_col_man']==1),'date_amt_combined']=1\n# df3.loc[(df3['date_flag']==0) & (df3['amount_col_man']==1),'date_amt_combined']=1\n# df3.loc[(df3['date_flag']==1) & (df3['amount_col_man']==0),'date_amt_combined']=0\n# df3.loc[(df3['date_flag']==0) & (df3['amount_col_man']==0),'date_amt_combined']=0\n\n\n\ndef is_date(string):\n try:\n parse(string)\n return 1\n except ValueError:\n return 0\n\nfor i in range(0,df3.shape[0]):\n s=df3.at[i,'row_string']\n words=s.split()\n for j in words:\n try:\n if is_date(j)==1:\n df3.at[i, 'date_flag'] = 1\n break\n except OverflowError:\n continue\n\nfor i in range(0,data.shape[0]):\n s=data.at[i,'row_string']\n words=s.split()\n for j in words:\n try:\n if is_date(j)==1:\n data.at[i, 'date_flag'] = 1\n break\n except OverflowError:\n continue\n\n\n\n# X=data[data['page_type_final']=='remittance'][['date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_distanceFromTop']]\n# Y= data[data['page_type_final']=='remittance']['is_remittance_final'].reset_index(drop=True)\n\n# X=pd.DataFrame()\n# Y=pd.DataFrame()\n# count=0\n# for i in data['check_checkNumber'].unique():\n# X=data[data['']]\n\ndef function_threshold(predictions, predictions_prob, Y_validation, thresh_list):\n output_classes_ = Y_validation.unique()\n combined_predictions = pd.DataFrame(\n {'pred_label': predictions, 'actual_label': Y_validation, 'prob_0': predictions_prob[:, 0],\n 'prob_1': predictions_prob[:, 1]})\n combined_predictions['max_prob'] = combined_predictions[['prob_0', 'prob_1']].max(axis=1)\n final_threshold_predictions = pd.DataFrame()\n\n for class_ in output_classes_:\n thresh = thresh_list[class_]\n pred_class_ = combined_predictions[combined_predictions['pred_label'] == class_]\n pred_class_thresh_ = pred_class_[pred_class_['max_prob'] > thresh]\n final_threshold_predictions = final_threshold_predictions.append(pred_class_thresh_)\n\n return final_threshold_predictions, final_threshold_predictions.loc[:,\n 'actual_label'], final_threshold_predictions.loc[:,\n 'pred_label'], final_threshold_predictions.loc[:,\n ['prob_0', 'prob_1']]\n\n\ndef print_metrics(Y_validation, predictions, predictions_prob=None):\n print(\"Accuracy \", accuracy_score(Y_validation, predictions))\n print(confusion_matrix(Y_validation, predictions))\n print(classification_report(Y_validation, predictions))\n if predictions_prob is not None:\n print(\"Log loss\", sklearn.metrics.log_loss(Y_validation, predictions_prob))\n\ncols=['page_type','date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_distanceFromTop']\nX_train=data[cols]\nX_validation=df3[cols]\nY_train = data['is_remittance_final'].reset_index(drop=True)\nY_validation = df3['is_remittance_final'].reset_index(drop=True)\n\nrfc = RandomForestClassifier(n_estimators=300)\nrfc.fit(X_train, Y_train)\npredictions = rfc.predict(X_validation)\npredictions_prob=rfc.predict_proba(X_validation)\nprint(accuracy_score(Y_validation, predictions))\nprint(confusion_matrix(Y_validation, predictions))\nprint(classification_report(Y_validation, predictions))\n#\n# thresh=[0.1,0.2,0.3,0.4]\n# for i in thresh:\n# combined_predictions ,threshold_actual , threshold_predictions , threshold_prob=function_threshold(predictions,predictions_prob,Y_validation,thresh_list=[0.5,i])\n# print(\"Lost samples:\", 1 - (len(threshold_predictions)/len(predictions)))\n# print_metrics(threshold_actual,threshold_predictions,threshold_prob)\n# print(0.5,i)\n# print(\"\\n\",\"\\n\")\n# print('******************************************',\"\\n\")\n#\n# thresh=[0.9,0.8,0.7,0.6]\n# for i in thresh:\n# combined_predictions ,threshold_actual , threshold_predictions , threshold_prob=function_threshold(predictions,predictions_prob,Y_validation,thresh_list=[i,0.5])\n# print(\"Lost samples:\", 1 - (len(threshold_predictions)/len(predictions)))\n# print_metrics(threshold_actual,threshold_predictions,threshold_prob)\n# print(i,0.5)\n# print(\"\\n\",\"\\n\")\n# print('******************************************',\"\\n\")\n# combined_predictions ,threshold_actual , threshold_predictions , threshold_prob=function_threshold(predictions,predictions_prob,Y_validation,thresh_list=[0.7,0.1])\n# print(\"Lost samples:\", 1 - (len(threshold_predictions)/len(predictions)))\n# print_metrics(threshold_actual,threshold_predictions,threshold_prob)\n\n# temp=pd.DataFrame()\n# temp=data[data['page_type_final']=='others'][['date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_distanceFromTop']]\n# temp2=pd.DataFrame()\n# temp2=data[data['page_type_final']=='others']['is_remittance_final'].reset_index(drop=True)\n# X_validation=X_validation.append(temp,ignore_index=True)\n# Y_validation=Y_validation.append(temp2,ignore_index=True)\n# X_validation=X_validation.reset_index(drop=True)\n# Y_validation=Y_validation.reset_index(drop=True)\n# predictions = rfc.predict(X_validation)\n# print(accuracy_score(Y_validation, predictions))\n# print(confusion_matrix(Y_validation, predictions))\n# print(classification_report(Y_validation, predictions))\n\ndf3['predictions']=predictions\ndf3=df3[['ratio_row_section','total_digits_coded','page_type','check_checkAmount','check_checkNumber','page_pageNumber','row_rowNumber','row_string','amount_col_man','date_flag','remittance_result','is_heading','is_total_final','is_remittance_final','predictions','ocr_filepath']]\ndf3.to_csv(\"C:\\\\Users\\\\shubham.kamal\\\\Desktop\\\\LITM\\\\success_2.csv\")\nprint(predictions_prob)\n#\ndf3=df3.reset_index(drop=True)\nto_check=pd.DataFrame()\ncount_original=0\ncount_predicted=0\nfor i in df3['check_checkNumber'].unique():\n for j in df3[df3['check_checkNumber']==i]['page_pageNumber'].unique():\n temp=pd.DataFrame()\n temp=df3[(df3['check_checkNumber']==i) & (df3['page_pageNumber']==j)]\n temp.reset_index(drop=True,inplace=True)\n count_original = temp[temp['is_remittance_final']==1].shape[0]\n count_predicted = temp[temp['predictions']==1].shape[0]\n if count_original!=count_predicted:\n print(count_original,count_predicted)\n to_check=to_check.append(temp,ignore_index=True)\n to_check=to_check.reset_index(drop=True)\n\nto_check.to_csv(\"C:\\\\Users\\\\shubham.kamal\\\\Desktop\\\\LITM\\\\to_check.csv\")\n\n\n#\n#\n#\n# ###############################################################################################\n#\n#\n# data=pd.read_csv(r\"C:\\Users\\shubham.kamal\\Desktop\\LITM\\Not_Success_rows_ver_clean.csv\", sep=',',encoding='cp1256')\n# data2=pd.read_csv(r\"C:\\Users\\shubham.kamal\\Desktop\\LITM\\toKamal-1.4_not.csv\", sep=',',encoding='cp1256',low_memory=False)\n#\n# data = data[data['page_type_final'] == 'remittance']\n#\n#\n# data3=data.append(data2,ignore_index=True)\n# data3['row_noOfCharacters']=pd.cut(data3['row_noOfCharacters'],bins=10).cat.codes\n# data3=data3.reset_index(drop=True)\n#\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n# data['row_noOfCharacters']=data3['row_noOfCharacters'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['row_noOfCharacters']=data3['row_noOfCharacters'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n#\n# for i in data['check_checkNumber'].unique():\n# for j in data[data['check_checkNumber']==i]['page_pageNumber'].unique():\n# temp=pd.DataFrame()\n# temp=data[(data['check_checkNumber']==i) & (data['page_pageNumber']==j)]\n# temp.reset_index(drop=True,inplace=True)\n# first_row=1\n# last_row=temp.at[temp.shape[0]-1,'row_rowNumber']\n# df=pd.DataFrame()\n# df=temp[temp['is_total_final']==1]\n# if df.empty:\n# total_row_number=last_row\n# else:\n# total_row_number=df.reset_index(drop=True).at[0,'row_rowNumber']\n# df2=pd.DataFrame()\n# df2=temp[temp['is_heading']==1]\n# if df2.empty:\n# heading_row_number=first_row\n# else:\n# heading_row_number=df2.reset_index(drop=True).at[0,'row_rowNumber']\n# data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & (data['row_rowNumber']<= heading_row_number),'remittance_result']=0\n# data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & ((data['row_rowNumber'] > heading_row_number) & (data['row_rowNumber'] < total_row_number)), 'remittance_result'] = 1\n# data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & (data['row_rowNumber'] >= total_row_number), 'remittance_result'] = 0\n#\n# for i in data2['check_checkNumber'].unique():\n# for j in data2[data2['check_checkNumber']==i]['page_pageNumber'].unique():\n# temp=pd.DataFrame()\n# temp=data2[(data2['check_checkNumber']==i) & (data2['page_pageNumber']==j)]\n# temp.reset_index(drop=True,inplace=True)\n# first_row=1\n# last_row=temp.at[temp.shape[0]-1,'row_rowNumber']\n# df=pd.DataFrame()\n# df=temp[temp['is_total_final']==1]\n# if df.empty:\n# total_row_number=last_row\n# else:\n# total_row_number=df.reset_index(drop=True).at[0,'row_rowNumber']\n# df2=pd.DataFrame()\n# df2=temp[temp['is_heading']==1]\n# if df2.empty:\n# heading_row_number=first_row\n# else:\n# heading_row_number=df2.reset_index(drop=True).at[0,'row_rowNumber']\n# data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & (data2['row_rowNumber']<= heading_row_number),'remittance_result']=0\n# data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & ((data2['row_rowNumber'] > heading_row_number) & (data2['row_rowNumber'] < total_row_number)), 'remittance_result'] = 1\n# data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & (data2['row_rowNumber'] >= total_row_number), 'remittance_result'] = 0\n#\n#\n# for i in range(0,data3.shape[0]):\n# s=data3.at[i,'row_string']\n# digits = sum(c.isdigit() for c in s)\n# letters = sum(c.isalpha() for c in s)\n# spaces = sum(c.isspace() for c in s)\n# others = len(s) - digits - letters - spaces\n#\n# total_charac=digits+letters+spaces+others\n# data3.at[i,'total_digits'] = digits/total_charac*100\n# data3.at[i, 'total_letters'] = letters/total_charac*100\n# data3.at[i, 'total_spaces'] = spaces/total_charac*100\n# data3.at[i, 'total_others'] = others/total_charac*100\n#\n# data3['total_digits_coded']=pd.cut(data3['total_digits'],bins=10).cat.codes\n# data3['total_letters_coded']=pd.cut(data3['total_letters'],bins=10).cat.codes\n# data3['total_spaces_coded']=pd.cut(data3['total_spaces'],bins=10).cat.codes\n# data3['total_others_coded']=pd.cut(data3['total_others'],bins=10).cat.codes\n# data3=data3.reset_index(drop=True)\n#\n#\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n# data['total_digits_coded']=data3['total_digits_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_digits_coded']=data3['total_digits_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n# data['total_letters_coded']=data3['total_letters_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_letters_coded']=data3['total_letters_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n# data['total_spaces_coded']=data3['total_spaces_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_spaces_coded']=data3['total_spaces_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n# data['total_others_coded']=data3['total_others_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_others_coded']=data3['total_others_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n#\n# df3=pd.DataFrame()\n# df3=data2.reset_index(drop=True)\n#\n#\n# data['ratio_row_section']=data['row_noOfCharacters']/data['section_noOfCharacters']\n# df3['ratio_row_section']=df3['row_noOfCharacters']/df3['section_noOfCharacters']\n#\n#\n# data['amount_col_man']=0\n# for i in range(0,data.shape[0]):\n# s=data.at[i,'row_string']\n# if '$' in s:\n# data.at[i, 'amount_col_man'] = 1\n# s = s.replace(',', '')\n# s=s.replace('$',' ')\n# digits=re.findall(r\"\\s+\\d+\\.\\d+$|\\s+\\d+\\.\\d+\\s+\", s,flags=re.MULTILINE)\n# for j in digits:\n# if float(j)<=data.at[i,'check_checkAmount']:\n# data.at[i,'amount_col_man']=1\n# break\n#\n#\n# df3['amount_col_man']=0\n# for i in range(0,df3.shape[0]):\n# s=df3.at[i,'row_string']\n# if '$' in s:\n# df3.at[i, 'amount_col_man'] = 1\n# s = s.replace(',', '')\n# s = s.replace('$', ' ')\n# digits=re.findall(r\"\\s+\\d+\\.\\d+$|\\s+\\d+\\.\\d+\\s+\", s,flags=re.MULTILINE)\n# for j in digits:\n# if float(j)<=df3.at[i,'check_checkAmount']:\n# df3.at[i,'amount_col_man']=1\n# break\n#\n#\n# pattern=re.compile(\"Jan(uary)?|Feb(ruary)?|Mar(ch)?|Apr(il)?|May|Jun(e)?|Jul(y)?|Aug(ust)?|Sep(tember)?|Oct(ober)?|Nov(ember)?|Dec(ember)?\\s+\\d{1,2}[,/.]\\s+\\d{4}([0-3]?[0-9][.|/][0-1]?[0-9][.|/](([0-9]{4})|([0-9]{2})))|([0-1]?[0-9][.|/][0-3]?[0-9][.|/](([0-9]{4})|([0-9]{2})))\",re.IGNORECASE)\n#\n#\n# def dateFlag(x):\n# global pattern\n# if pattern.search(str(x)) is not None:\n# return 1\n# else:\n# return 0\n#\n#\n# data['date_flag'] = data['row_string'].apply(dateFlag)\n# df3['date_flag'] = df3['row_string'].apply(dateFlag)\n#\n# data['date_amt_combined']=0\n# df3['date_amt_combined']=0\n# data.loc[(data['date_flag']==1) & (data['amount_col_man']==1),'date_amt_combined']=1\n# data.loc[(data['date_flag']==0) & (data['amount_col_man']==1),'date_amt_combined']=1\n# data.loc[(data['date_flag']==1) & (data['amount_col_man']==0),'date_amt_combined']=0\n# data.loc[(data['date_flag']==0) & (data['amount_col_man']==0),'date_amt_combined']=0\n#\n# df3.loc[(df3['date_flag']==1) & (df3['amount_col_man']==1),'date_amt_combined']=1\n# df3.loc[(df3['date_flag']==0) & (df3['amount_col_man']==1),'date_amt_combined']=1\n# df3.loc[(df3['date_flag']==1) & (df3['amount_col_man']==0),'date_amt_combined']=0\n# df3.loc[(df3['date_flag']==0) & (df3['amount_col_man']==0),'date_amt_combined']=0\n#\n#\n#\n# def is_date(string):\n# try:\n# parse(string)\n# return 1\n# except ValueError:\n# return 0\n#\n# for i in range(0,df3.shape[0]):\n# s=df3.at[i,'row_string']\n# words=s.split()\n# for j in words:\n# try:\n# if is_date(j)==1:\n# df3.at[i, 'date_flag'] = 1\n# break\n# except OverflowError:\n# continue\n#\n# for i in range(0,data.shape[0]):\n# s=data.at[i,'row_string']\n# words=s.split()\n# for j in words:\n# try:\n# if is_date(j)==1:\n# data.at[i, 'date_flag'] = 1\n# break\n# except OverflowError:\n# continue\n#\n#\n#\n#\n# # X_train=data[['date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_distanceFromTop']]\n# # X_validation=df3[['date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_distanceFromTop']]\n# # Y_train = data['is_remittance_final'].reset_index(drop=True)\n# # Y_validation = df3['is_remittance_final'].reset_index(drop=True)\n#\n# X=data[['date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_distanceFromTop']]\n# Y= data['is_remittance_final'].reset_index(drop=True)\n# validation_size=0.3\n# X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size)\n#\n#\n# rfc = RandomForestClassifier(n_estimators=300)\n# rfc.fit(X_train, Y_train)\n# predictions = rfc.predict(X_validation)\n# print(accuracy_score(Y_validation, predictions))\n# print(confusion_matrix(Y_validation, predictions))\n# print(classification_report(Y_validation, predictions))\n#\n#\n#\n#\n# # df3['predictions']=predictions\n# # df3=df3[['ratio_row_section','total_digits_coded','total_others_coded','is_heading','is_total_final','check_checkAmount','check_checkNumber','page_pageNumber','row_rowNumber','row_string','amount_col_man','date_flag','remittance_result','is_remittance_final','predictions','ocr_filepath']]\n# # df3.to_csv(\"C:\\\\Users\\\\shubham.kamal\\\\Desktop\\\\LITM\\\\not_success_2.csv\")\n# # #\n# #\n# # ############################################################################\n#\n# data=pd.read_csv(r\"C:\\Users\\shubham.kamal\\Desktop\\LITM\\Not_Success_rows_ver_clean.csv\", sep=',',encoding='cp1256')\n# data2=pd.read_csv(r\"C:\\Users\\shubham.kamal\\Desktop\\LITM\\toKamal-1.3_success.csv\", sep=',',encoding='cp1256',low_memory=False)\n#\n# data = data[(data['page_type_final'] == 'remittance') | (data['page_type_final'] == 'others')]\n#\n# data3=data.append(data2,ignore_index=True)\n# data3['row_noOfCharacters']=pd.cut(data3['row_noOfCharacters'],bins=10).cat.codes\n# data3=data3.reset_index(drop=True)\n#\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n# data['row_noOfCharacters']=data3['row_noOfCharacters'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['row_noOfCharacters']=data3['row_noOfCharacters'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n#\n# for i in data['check_checkNumber'].unique():\n# for j in data[data['check_checkNumber']==i]['page_pageNumber'].unique():\n# temp=pd.DataFrame()\n# temp=data[(data['check_checkNumber']==i) & (data['page_pageNumber']==j)]\n# temp.reset_index(drop=True,inplace=True)\n# first_row=1\n# last_row=temp.at[temp.shape[0]-1,'row_rowNumber']\n# df=pd.DataFrame()\n# df=temp[temp['is_total_final']==1]\n# if df.empty:\n# total_row_number=last_row\n# else:\n# total_row_number=df.reset_index(drop=True).at[0,'row_rowNumber']\n# df2=pd.DataFrame()\n# df2=temp[temp['is_heading']==1]\n# if df2.empty:\n# heading_row_number=first_row\n# else:\n# heading_row_number=df2.reset_index(drop=True).at[0,'row_rowNumber']\n# data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & (data['row_rowNumber']<= heading_row_number),'remittance_result']=0\n# data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & ((data['row_rowNumber'] > heading_row_number) & (data['row_rowNumber'] < total_row_number)), 'remittance_result'] = 1\n# data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & (data['row_rowNumber'] >= total_row_number), 'remittance_result'] = 0\n#\n# for i in data2['check_checkNumber'].unique():\n# for j in data2[data2['check_checkNumber']==i]['page_pageNumber'].unique():\n# temp=pd.DataFrame()\n# temp=data2[(data2['check_checkNumber']==i) & (data2['page_pageNumber']==j)]\n# temp.reset_index(drop=True,inplace=True)\n# first_row=1\n# last_row=temp.at[temp.shape[0]-1,'row_rowNumber']\n# df=pd.DataFrame()\n# df=temp[temp['is_total_final']==1]\n# if df.empty:\n# total_row_number=last_row\n# else:\n# total_row_number=df.reset_index(drop=True).at[0,'row_rowNumber']\n# df2=pd.DataFrame()\n# df2=temp[temp['is_heading']==1]\n# if df2.empty:\n# heading_row_number=first_row\n# else:\n# heading_row_number=df2.reset_index(drop=True).at[0,'row_rowNumber']\n# data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & (data2['row_rowNumber']<= heading_row_number),'remittance_result']=0\n# data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & ((data2['row_rowNumber'] > heading_row_number) & (data2['row_rowNumber'] < total_row_number)), 'remittance_result'] = 1\n# data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & (data2['row_rowNumber'] >= total_row_number), 'remittance_result'] = 0\n#\n#\n# for i in range(0,data3.shape[0]):\n# s=data3.at[i,'row_string']\n# digits = sum(c.isdigit() for c in s)\n# letters = sum(c.isalpha() for c in s)\n# spaces = sum(c.isspace() for c in s)\n# others = len(s) - digits - letters - spaces\n#\n# total_charac=digits+letters+spaces+others\n# data3.at[i,'total_digits'] = digits/total_charac*100\n# data3.at[i, 'total_letters'] = letters/total_charac*100\n# data3.at[i, 'total_spaces'] = spaces/total_charac*100\n# data3.at[i, 'total_others'] = others/total_charac*100\n#\n# data3['total_digits_coded']=pd.cut(data3['total_digits'],bins=10).cat.codes\n# data3['total_letters_coded']=pd.cut(data3['total_letters'],bins=10).cat.codes\n# data3['total_spaces_coded']=pd.cut(data3['total_spaces'],bins=10).cat.codes\n# data3['total_others_coded']=pd.cut(data3['total_others'],bins=10).cat.codes\n# data3=data3.reset_index(drop=True)\n#\n#\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n# data['total_digits_coded']=data3['total_digits_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_digits_coded']=data3['total_digits_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n# data['total_letters_coded']=data3['total_letters_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_letters_coded']=data3['total_letters_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n# data['total_spaces_coded']=data3['total_spaces_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_spaces_coded']=data3['total_spaces_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n# data['total_others_coded']=data3['total_others_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_others_coded']=data3['total_others_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n#\n# df3=pd.DataFrame()\n# df3=data2.reset_index(drop=True)\n#\n#\n# data['ratio_row_section']=data['row_noOfCharacters']/data['section_noOfCharacters']\n# df3['ratio_row_section']=df3['row_noOfCharacters']/df3['section_noOfCharacters']\n#\n#\n# data['amount_col_man']=0\n# for i in range(0,data.shape[0]):\n# s=data.at[i,'row_string']\n# if '$' in s:\n# data.at[i, 'amount_col_man'] = 1\n# s = s.replace(',', '')\n# s=s.replace('$',' ')\n# digits=re.findall(r\"\\s+\\d+\\.\\d+$|\\s+\\d+\\.\\d+\\s+\", s,flags=re.MULTILINE)\n# for j in digits:\n# if float(j)<=data.at[i,'check_checkAmount']:\n# data.at[i,'amount_col_man']=1\n# break\n#\n#\n# df3['amount_col_man']=0\n# for i in range(0,df3.shape[0]):\n# s=df3.at[i,'row_string']\n# if '$' in s:\n# df3.at[i, 'amount_col_man'] = 1\n# s = s.replace(',', '')\n# s = s.replace('$', ' ')\n# digits=re.findall(r\"\\s+\\d+\\.\\d+$|\\s+\\d+\\.\\d+\\s+\", s,flags=re.MULTILINE)\n# for j in digits:\n# if float(j)<=df3.at[i,'check_checkAmount']:\n# df3.at[i,'amount_col_man']=1\n# break\n#\n#\n# pattern=re.compile(\"Jan(uary)?|Feb(ruary)?|Mar(ch)?|Apr(il)?|May|Jun(e)?|Jul(y)?|Aug(ust)?|Sep(tember)?|Oct(ober)?|Nov(ember)?|Dec(ember)?\\s+\\d{1,2}[,/.]\\s+\\d{4}([0-3]?[0-9][.|/][0-1]?[0-9][.|/](([0-9]{4})|([0-9]{2})))|([0-1]?[0-9][.|/][0-3]?[0-9][.|/](([0-9]{4})|([0-9]{2})))\",re.IGNORECASE)\n#\n#\n# def dateFlag(x):\n# global pattern\n# if pattern.search(str(x)) is not None:\n# return 1\n# else:\n# return 0\n#\n#\n# data['date_flag'] = data['row_string'].apply(dateFlag)\n# df3['date_flag'] = df3['row_string'].apply(dateFlag)\n#\n# data['date_amt_combined']=0\n# df3['date_amt_combined']=0\n# data.loc[(data['date_flag']==1) & (data['amount_col_man']==1),'date_amt_combined']=1\n# data.loc[(data['date_flag']==0) & (data['amount_col_man']==1),'date_amt_combined']=1\n# data.loc[(data['date_flag']==1) & (data['amount_col_man']==0),'date_amt_combined']=0\n# data.loc[(data['date_flag']==0) & (data['amount_col_man']==0),'date_amt_combined']=0\n#\n# df3.loc[(df3['date_flag']==1) & (df3['amount_col_man']==1),'date_amt_combined']=1\n# df3.loc[(df3['date_flag']==0) & (df3['amount_col_man']==1),'date_amt_combined']=1\n# df3.loc[(df3['date_flag']==1) & (df3['amount_col_man']==0),'date_amt_combined']=0\n# df3.loc[(df3['date_flag']==0) & (df3['amount_col_man']==0),'date_amt_combined']=0\n#\n# def is_date(string):\n# try:\n# parse(string)\n# return 1\n# except ValueError:\n# return 0\n#\n# for i in range(0,df3.shape[0]):\n# s=df3.at[i,'row_string']\n# words=s.split()\n# for j in words:\n# try:\n# if is_date(j)==1:\n# df3.at[i, 'date_flag'] = 1\n# break\n# except OverflowError:\n# continue\n#\n# for i in range(0,data.shape[0]):\n# s=data.at[i,'row_string']\n# words=s.split()\n# for j in words:\n# try:\n# if is_date(j)==1:\n# data.at[i, 'date_flag'] = 1\n# break\n# except OverflowError:\n# continue\n#\n#\n# X_train=data[['date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_distanceFromTop']]\n# X_validation=df3[['date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_distanceFromTop']]\n# Y_train = data['is_remittance_final'].reset_index(drop=True)\n# Y_validation = df3['is_remittance_final'].reset_index(drop=True)\n# voca=['date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_rowNumber']\n# rfc = RandomForestClassifier(n_estimators=300)\n# rfc.fit(X_train, Y_train)\n# predictions = rfc.predict(X_validation)\n# print(accuracy_score(Y_validation, predictions))\n# print(confusion_matrix(Y_validation, predictions))\n# print(classification_report(Y_validation, predictions))\n#\n# importances = rfc.feature_importances_\n# print(importances)\n# std = np.std([tree.feature_importances_ for tree in rfc.estimators_],\n# axis=0)\n# indices = np.argsort(importances)[::-1]\n# print(\"Feature ranking:\")\n# #print(train_features.columns)\n#\n# for f in range(X_train.shape[1]):\n# print(\"%d. %s (%f)\" % (f + 1, voca[indices[f]], importances[indices[f]]))\n#\n# # # Plot the feature importances of the forest\n# # plt.figure()\n# # plt.title(\"Feature importances\")\n# # plt.bar(range(X_train.shape[1]), importances[indices],\n# # color=\"r\", yerr=std[indices], align=\"center\")\n# # plt.xticks(range(X_train.shape[1]), myList)\n# # plt.xlim([-1, X_train.shape[1]])\n# # plt.show()\n#\n#\n#\n# df3['predictions']=predictions\n# df3=df3[['ratio_row_section','total_digits_coded','total_others_coded','is_heading','is_total_final','check_checkAmount','check_checkNumber','page_pageNumber','row_rowNumber','row_string','amount_col_man','date_flag','remittance_result','is_remittance_final','predictions','ocr_filepath']]\n# df3.to_csv(\"C:\\\\Users\\\\shubham.kamal\\\\Desktop\\\\LITM\\\\success_1.csv\")\n#\n#\n# ##############################################################################################\n#\n# data=pd.read_csv(r\"C:\\Users\\shubham.kamal\\Desktop\\LITM\\Not_Success_rows_ver_clean.csv\", sep=',',encoding='cp1256')\n# data2=pd.read_csv(r\"C:\\Users\\shubham.kamal\\Desktop\\LITM\\toKamal-1.4_success.csv\", sep=',',encoding='cp1256',low_memory=False)\n#\n#\n# data=data[data['page_type_final']=='remittance']\n#\n# data3=data.append(data2,ignore_index=True)\n# data3['row_noOfCharacters']=pd.cut(data3['row_noOfCharacters'],bins=10).cat.codes\n# data3=data3.reset_index(drop=True)\n#\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n# data['row_noOfCharacters']=data3['row_noOfCharacters'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['row_noOfCharacters']=data3['row_noOfCharacters'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n#\n# for i in data['check_checkNumber'].unique():\n# for j in data[data['check_checkNumber']==i]['page_pageNumber'].unique():\n# temp=pd.DataFrame()\n# temp=data[(data['check_checkNumber']==i) & (data['page_pageNumber']==j)]\n# temp.reset_index(drop=True,inplace=True)\n# first_row=1\n# last_row=temp.at[temp.shape[0]-1,'row_rowNumber']\n# df=pd.DataFrame()\n# df=temp[temp['is_total_final']==1]\n# if df.empty:\n# total_row_number=last_row\n# else:\n# total_row_number=df.reset_index(drop=True).at[0,'row_rowNumber']\n# df2=pd.DataFrame()\n# df2=temp[temp['is_heading']==1]\n# if df2.empty:\n# heading_row_number=first_row\n# else:\n# heading_row_number=df2.reset_index(drop=True).at[0,'row_rowNumber']\n# data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & (data['row_rowNumber']<= heading_row_number),'remittance_result']=0\n# data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & ((data['row_rowNumber'] > heading_row_number) & (data['row_rowNumber'] < total_row_number)), 'remittance_result'] = 1\n# data.loc[(data['check_checkNumber'] == i) & (data['page_pageNumber'] == j) & (data['row_rowNumber'] >= total_row_number), 'remittance_result'] = 0\n#\n# for i in data2['check_checkNumber'].unique():\n# for j in data2[data2['check_checkNumber']==i]['page_pageNumber'].unique():\n# temp=pd.DataFrame()\n# temp=data2[(data2['check_checkNumber']==i) & (data2['page_pageNumber']==j)]\n# temp.reset_index(drop=True,inplace=True)\n# first_row=1\n# last_row=temp.at[temp.shape[0]-1,'row_rowNumber']\n# df=pd.DataFrame()\n# df=temp[temp['is_total_final']==1]\n# if df.empty:\n# total_row_number=last_row\n# else:\n# total_row_number=df.reset_index(drop=True).at[0,'row_rowNumber']\n# df2=pd.DataFrame()\n# df2=temp[temp['is_heading']==1]\n# if df2.empty:\n# heading_row_number=first_row\n# else:\n# heading_row_number=df2.reset_index(drop=True).at[0,'row_rowNumber']\n# data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & (data2['row_rowNumber']<= heading_row_number),'remittance_result']=0\n# data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & ((data2['row_rowNumber'] > heading_row_number) & (data2['row_rowNumber'] < total_row_number)), 'remittance_result'] = 1\n# data2.loc[(data2['check_checkNumber'] == i) & (data2['page_pageNumber'] == j) & (data2['row_rowNumber'] >= total_row_number), 'remittance_result'] = 0\n#\n#\n# for i in range(0,data3.shape[0]):\n# s=data3.at[i,'row_string']\n# digits = sum(c.isdigit() for c in s)\n# letters = sum(c.isalpha() for c in s)\n# spaces = sum(c.isspace() for c in s)\n# others = len(s) - digits - letters - spaces\n#\n# total_charac=digits+letters+spaces+others\n# data3.at[i,'total_digits'] = digits/total_charac*100\n# data3.at[i, 'total_letters'] = letters/total_charac*100\n# data3.at[i, 'total_spaces'] = spaces/total_charac*100\n# data3.at[i, 'total_others'] = others/total_charac*100\n#\n# data3['total_digits_coded']=pd.cut(data3['total_digits'],bins=10).cat.codes\n# data3['total_letters_coded']=pd.cut(data3['total_letters'],bins=10).cat.codes\n# data3['total_spaces_coded']=pd.cut(data3['total_spaces'],bins=10).cat.codes\n# data3['total_others_coded']=pd.cut(data3['total_others'],bins=10).cat.codes\n# data3=data3.reset_index(drop=True)\n#\n#\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n# data['total_digits_coded']=data3['total_digits_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_digits_coded']=data3['total_digits_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n# data['total_letters_coded']=data3['total_letters_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_letters_coded']=data3['total_letters_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n# data['total_spaces_coded']=data3['total_spaces_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_spaces_coded']=data3['total_spaces_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n# data['total_others_coded']=data3['total_others_coded'].loc[:data.shape[0]-1].reset_index(drop=True)\n# data2['total_others_coded']=data3['total_others_coded'].loc[data.shape[0]:data3.shape[0]-1].reset_index(drop=True)\n# data=data.reset_index(drop=True)\n# data2=data2.reset_index(drop=True)\n#\n#\n# df3=pd.DataFrame()\n# df3=data2.reset_index(drop=True)\n#\n#\n# data['ratio_row_section']=data['row_noOfCharacters']/data['section_noOfCharacters']\n# df3['ratio_row_section']=df3['row_noOfCharacters']/df3['section_noOfCharacters']\n#\n#\n# data['amount_col_man']=0\n# for i in range(0,data.shape[0]):\n# s=data.at[i,'row_string']\n# if '$' in s:\n# data.at[i, 'amount_col_man'] = 1\n# s = s.replace(',', '')\n# s=s.replace('$',' ')\n# digits=re.findall(r\"\\s+\\d+\\.\\d+$|\\s+\\d+\\.\\d+\\s+\", s,flags=re.MULTILINE)\n# for j in digits:\n# if float(j)<=data.at[i,'check_checkAmount']:\n# data.at[i,'amount_col_man']=1\n# break\n#\n#\n# df3['amount_col_man']=0\n# for i in range(0,df3.shape[0]):\n# s=df3.at[i,'row_string']\n# if '$' in s:\n# df3.at[i, 'amount_col_man'] = 1\n# s = s.replace(',', '')\n# s = s.replace('$', ' ')\n# digits=re.findall(r\"\\s+\\d+\\.\\d+$|\\s+\\d+\\.\\d+\\s+\", s,flags=re.MULTILINE)\n# for j in digits:\n# if float(j)<=df3.at[i,'check_checkAmount']:\n# df3.at[i,'amount_col_man']=1\n# break\n#\n#\n# pattern=re.compile(\"Jan(uary)?|Feb(ruary)?|Mar(ch)?|Apr(il)?|May|Jun(e)?|Jul(y)?|Aug(ust)?|Sep(tember)?|Oct(ober)?|Nov(ember)?|Dec(ember)?\\s+\\d{1,2}[,/.]\\s+\\d{4}([0-3]?[0-9][.|/][0-1]?[0-9][.|/](([0-9]{4})|([0-9]{2})))|([0-1]?[0-9][.|/][0-3]?[0-9][.|/](([0-9]{4})|([0-9]{2})))\",re.IGNORECASE)\n#\n#\n# def dateFlag(x):\n# global pattern\n# if pattern.search(str(x)) is not None:\n# return 1\n# else:\n# return 0\n#\n#\n# data['date_flag'] = data['row_string'].apply(dateFlag)\n# df3['date_flag'] = df3['row_string'].apply(dateFlag)\n#\n#\n#\n#\n# def is_date(string):\n# try:\n# parse(string)\n# return 1\n# except ValueError:\n# return 0\n#\n# for i in range(0,df3.shape[0]):\n# s=df3.at[i,'row_string']\n# words=s.split()\n# for j in words:\n# try:\n# if is_date(j)==1:\n# df3.at[i, 'date_flag'] = 1\n# break\n# except OverflowError:\n# continue\n#\n# for i in range(0,data.shape[0]):\n# s=data.at[i,'row_string']\n# words=s.split()\n# for j in words:\n# try:\n# if is_date(j)==1:\n# data.at[i, 'date_flag'] = 1\n# break\n# except OverflowError:\n# continue\n#\n#\n#\n# data['date_amt_combined']=0\n# df3['date_amt_combined']=0\n# data.loc[(data['date_flag']==1) & (data['amount_col_man']==1),'date_amt_combined']=1\n# data.loc[(data['date_flag']==0) & (data['amount_col_man']==1),'date_amt_combined']=1\n# data.loc[(data['date_flag']==1) & (data['amount_col_man']==0),'date_amt_combined']=0\n# data.loc[(data['date_flag']==0) & (data['amount_col_man']==0),'date_amt_combined']=0\n#\n# df3.loc[(df3['date_flag']==1) & (df3['amount_col_man']==1),'date_amt_combined']=1\n# df3.loc[(df3['date_flag']==0) & (df3['amount_col_man']==1),'date_amt_combined']=1\n# df3.loc[(df3['date_flag']==1) & (df3['amount_col_man']==0),'date_amt_combined']=0\n# df3.loc[(df3['date_flag']==0) & (df3['amount_col_man']==0),'date_amt_combined']=0\n#\n# X_train=data[['date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','total_others_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_distanceFromTop']]\n# X_validation=df3[['date_flag','amount_col_man','ratio_row_section','row_noOfCharacters','remittance_result','total_digits_coded','total_others_coded','row_JosasisLRCoordinates_left','row_JosasisLRCoordinates_right','row_distanceFromLeft','row_distanceFromTop']]\n# Y_train = data['is_remittance_final'].reset_index(drop=True)\n# Y_validation = df3['is_remittance_final'].reset_index(drop=True)\n#\n# rfc = RandomForestClassifier(n_estimators=300)\n# rfc.fit(X_train, Y_train)\n# predictions = rfc.predict(X_validation)\n# print(accuracy_score(Y_validation, predictions))\n# print(confusion_matrix(Y_validation, predictions))\n# print(classification_report(Y_validation, predictions))\n#\n#\n# df3['predictions']=predictions\n# df3=df3[['ratio_row_section','total_digits_coded','total_others_coded','is_heading','is_total_final','check_checkAmount','check_checkNumber','page_pageNumber','row_rowNumber','row_string','amount_col_man','date_flag','remittance_result','is_remittance_final','predictions','ocr_filepath']]\n# df3.to_csv(\"C:\\\\Users\\\\shubham.kamal\\\\Desktop\\\\LITM\\\\success_2.csv\")\n#\n#\n# ######################################################################\n#\n# #\n# data=pd.read_csv(r\"C:\\Users\\shubham.kamal\\Desktop\\LITM\\Not_Success_rows_ver_clean.csv\", sep=',',encoding='cp1256')\n# # data2=pd.read_csv(r\"C:\\Users\\shubham.kamal\\Desktop\\LITM\\Status.csv\", sep=',',encoding='cp1256',low_memory=False)\n# # print(data.shape[0])\n# # print(data2.shape[0])\n# # for i in data['check_checkNumber'].unique():\n# # data.loc[data['check_checkNumber']==i,'indexing_status'] = data2[data2['Check Number']==i]['indexing_status'].values\n# # print(data['indexing_status'].value_counts())\n# #\n# temp=pd.DataFrame()\n# temp=data.groupby(['check_checkNumber','page_pageNumber']).size().reset_index().rename(columns={0:'count'})\n# print(temp.head(4))\n# print(temp.shape[0])" } ]
1
n-miyamoto/multvariable_lstm
https://github.com/n-miyamoto/multvariable_lstm
7811a4f223c03e8a5262ef13d2718f6fb2ff9d0b
c83b30dafbacab37baf6c8176fcbd29904966da6
ab0e89147d9c7b8aabbbeea79834de047d35e520
refs/heads/master
2020-03-07T14:47:50.747291
2018-04-01T02:04:51
2018-04-01T02:04:51
127,535,874
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5417680740356445, "alphanum_fraction": 0.5600162148475647, "avg_line_length": 25.79347801208496, "blob_id": "c499167bb73b58033eb910c8af63e2a2fbb5305f", "content_id": "38a6122b03103bee1261d57df18efb740c1e2f67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2466, "license_type": "no_license", "max_line_length": 130, "num_lines": 92, "path": "/train.py", "repo_name": "n-miyamoto/multvariable_lstm", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom make_data import *\nfrom stacked_lstm import *\nimport numpy as np\nfrom chainer import optimizers, cuda\nimport time\nimport sys\nimport _pickle as cPickle\n\nIN_UNITS = 5\nHIDDEN_UNITS_L1 = 80\nHIDDEN_UNITS_L2 = 80\nOUT_UNITS = 5\nTRAINING_EPOCHS = 4000\nDISPLAY_EPOCH = 10\nMINI_BATCH_SIZE = 100\nLENGTH_OF_SEQUENCE = 100\nSTEPS_PER_CYCLE = 50\nNUMBER_OF_CYCLES = 100\n\nxp = cuda.cupy\n\ndef compute_loss(model, sequences):\n loss = 0\n num, rows, cols = sequences.shape\n length_of_sequence = cols\n for i in range(cols - 1):\n x = chainer.Variable(\n xp.asarray(\n [[sequences[k, j, i + 0] for k in range(num)] for j in range(rows)], \n dtype=np.float32\n )\n )\n t = chainer.Variable(\n xp.asarray(\n [[sequences[k, j, i + 1] for k in range(num)] for j in range(rows)], \n dtype=np.float32\n )\n )\n loss += model(x, t)\n return loss \n\n\nif __name__ == \"__main__\":\n\n # make training data\n data_maker = DataMaker(steps_per_cycle=STEPS_PER_CYCLE, number_of_cycles=NUMBER_OF_CYCLES)\n train_data = data_maker.make()\n\n # setup model\n model = LSTM(IN_UNITS, HIDDEN_UNITS_L1, HIDDEN_UNITS_L2 ,OUT_UNITS)\n for param in model.params():\n data = param.data\n data[:] = np.random.uniform(-0.1, 0.1, data.shape)\n\n model.to_gpu(0)\n\n # setup optimizer\n optimizer = optimizers.Adam()\n optimizer.setup(model)\n\n start = time.time()\n cur_start = start\n for epoch in range(TRAINING_EPOCHS):\n sequences = data_maker.make_mini_batch(train_data, mini_batch_size=MINI_BATCH_SIZE, length_of_sequence=LENGTH_OF_SEQUENCE)\n model.reset_state()\n model.zerograds()\n loss = compute_loss(model, sequences)\n loss.backward()\n optimizer.update()\n\n if epoch != 0 and epoch % DISPLAY_EPOCH == 0:\n cur_end = time.time()\n # display loss\n print(\n \"[{j}]training loss:\\t{i}\\t{k}[sec/epoch]\".format(\n j=epoch, \n i=loss.data/(sequences.shape[1] - 1), \n k=(cur_end - cur_start)/DISPLAY_EPOCH\n )\n )\n cur_start = time.time() \n sys.stdout.flush()\n\n end = time.time()\n\n # save model\n cPickle.dump(model, open(\"./model.pkl\", \"wb\"))\n\n print(\"{}[sec]\".format(end - start))\n\n" }, { "alpha_fraction": 0.6029556393623352, "alphanum_fraction": 0.6177340149879456, "avg_line_length": 29.727272033691406, "blob_id": "0b9d52a429cce696b0c9dad8e58a7d27be4f5aae", "content_id": "9f598d122a25ff00be05ab9581a5f389408a7922", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1015, "license_type": "no_license", "max_line_length": 97, "num_lines": 33, "path": "/make_data.py", "repo_name": "n-miyamoto/multvariable_lstm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n \nimport numpy as np\nimport math\nimport random\nimport pandas as pd \n\nROW_SIZE = 5\nTRAINING_START = 0\nTRAINING_SIGNAL_LENGTH = 100000\n\nrandom.seed(0)\n \n\nclass DataMaker(object):\n \n def __init__(self, steps_per_cycle, number_of_cycles):\n #self.steps_per_cycle = steps_per_cycle\n #self.number_of_cycles = number_of_cycles\n self.df = pd.read_csv(\"5sins.csv\",encoding=\"shift_jis\") \n \n def make(self):\n return self.df \n\n def make_mini_batch(self, data, mini_batch_size, length_of_sequence): \n sequences = np.ndarray((ROW_SIZE, mini_batch_size, length_of_sequence), dtype=np.float32)\n for j in range(ROW_SIZE):\n data = self.df.ix[TRAINING_START:TRAINING_SIGNAL_LENGTH,j+1]\n for i in range(mini_batch_size):\n index = random.randint(0, len(data) - length_of_sequence)\n sequences[j][i] = data[index:index+length_of_sequence]\n return sequences\n\n" }, { "alpha_fraction": 0.6348837018013, "alphanum_fraction": 0.6483721137046814, "avg_line_length": 29.700000762939453, "blob_id": "e3b77572eca443a7aa1a28de99f24eb54a9251c3", "content_id": "5e3ad40a8191e27910678ed66df758dcad070396", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2150, "license_type": "no_license", "max_line_length": 120, "num_lines": 70, "path": "/predict.py", "repo_name": "n-miyamoto/multvariable_lstm", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n \n \nimport _pickle as cPickle\nimport numpy as np\nfrom chainer import optimizers, cuda\nimport chainer\nfrom make_data import *\n\n\nMODEL_PATH = \"./model.pkl\"\nPREDICTION_LENGTH = 75\nPREDICTION_PATH = \"./prediction.txt\"\nINITIAL_PATH = \"./initial.txt\"\nMINI_BATCH_SIZE = 100\nLENGTH_OF_SEQUENCE = 100\nSTEPS_PER_CYCLE = 50\nNUMBER_OF_CYCLES = 100\nxp = cuda.cupy\n\n\ndef predict_sequence(model, input_seq, output_seq, dummy):\n sequences_col = len(input_seq)\n model.reset_state()\n for i in range(sequences_col):\n x = chainer.Variable(xp.asarray(input_seq[i:i+1], dtype=np.float32)[:, np.newaxis])\n future = model(x, dummy)\n cpu_future = chainer.cuda.to_cpu(future.data)\n return cpu_future\n\n\ndef predict(seq, model, pre_length, initial_path, prediction_path):\n # initial sequence \n input_seq = np.array(seq[:seq.shape[0]//4])\n\n output_seq = np.empty(0)\n \n # append an initial value\n output_seq = np.append(output_seq, input_seq[-1])\n\n model.train = False\n dummy = chainer.Variable(xp.asarray([0], dtype=np.float32)[:, np.newaxis])\n\n for i in range(pre_length):\n future = predict_sequence(model, input_seq, output_seq, dummy)\n input_seq = np.delete(input_seq, 0)\n input_seq = np.append(input_seq, future)\n output_seq = np.append(output_seq, future)\n\n with open(prediction_path, \"w\") as f:\n for (i, v) in enumerate(output_seq.tolist(), start=input_seq.shape[0]):\n f.write(\"{i} {v}\\n\".format(i=i-1, v=v))\n\n with open(initial_path, \"w\") as f:\n for (i, v) in enumerate(seq.tolist()):\n f.write(\"{i} {v}\\n\".format(i=i, v=v))\n\n\nif __name__ == \"__main__\":\n # load model\n model = cPickle.load(open(MODEL_PATH, 'rb'))\n\n # make data\n data_maker = DataMaker(steps_per_cycle=STEPS_PER_CYCLE, number_of_cycles=NUMBER_OF_CYCLES)\n data = data_maker.make()\n sequences = data_maker.make_mini_batch(data, mini_batch_size=MINI_BATCH_SIZE, length_of_sequence=LENGTH_OF_SEQUENCE)\n\n sample_index = 45\n predict(sequences[sample_index], model, PREDICTION_LENGTH, INITIAL_PATH, PREDICTION_PATH)\n\n" } ]
3
Lethamyre/PracticeCource
https://github.com/Lethamyre/PracticeCource
c0f46b3b3bfa35624ca8cbbbb1d3ceca5cc14fe2
ba337ec6d36e21344e93500e80a5227d8d627958
78e806c42683fea4d72bf1526654121d630b4070
refs/heads/master
2022-12-07T04:36:45.251857
2020-09-02T07:50:51
2020-09-02T07:50:51
292,215,768
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5139331221580505, "alphanum_fraction": 0.5398089289665222, "avg_line_length": 30.362499237060547, "blob_id": "747596aea4ed37a30ac1a95af75f5c13fc3b9696", "content_id": "dd7b041f84df595da66569a692eda7d2142fc2c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2842, "license_type": "no_license", "max_line_length": 117, "num_lines": 80, "path": "/Course practice 2.py", "repo_name": "Lethamyre/PracticeCource", "src_encoding": "UTF-8", "text": "# my_list = [12, None, 53, 'Well i donno what to do', True, 2131.12 ]\n#\n# for x in my_list:\n# print(type(x))\n\n# elem_count = int(input('Введите количество элементов в списке'))\n# my_list = []\n# x = 0\n# elem = 0\n# while x < elem_count:\n# my_list.append(int(input('Какое число добавить в список?')))\n# x = x + 1\n#\n# for el in range(int(len(my_list))):\n# my_list[elem], my_list[elem + 1] = my_list[elem + 1], my_list[elem]\n# el += 2\n# print(my_list)\n\n# my_list = ['Winter', 'Spring', 'Summer', 'Autumn']\n# my_dict = {1 : 'Winter', 2 : 'Spring', 3 : 'Summer', 4 : 'Autumn'}\n# month = int(input(\"Введите месяц по номеру \"))\n# if month ==1 or month == 12 or month == 2:\n# print(my_dict.get(1))\n# # print(my_list[0])\n# elif month == 3 or month == 4 or month ==5:\n# print(my_dict.get(2))\n# # print(my_list[1])\n# elif month == 6 or month == 7 or month == 8:\n# print(my_dict.get(3))\n# # print(my_list[2])\n#\n# elif month == 9 or month == 10 or month == 11:\n# print(my_dict.get(4))\n# # print(my_list[3])\n# else:\n# print(\"Такого месяца не существует\")\n\n# my_list = input(\"Ваше предложение\")\n# my_word = []\n# num = 1\n# for el in range(my_list.count(' ') + 1):\n# my_word = my_list.split()\n# if len(str(my_word)) <= 10:\n# print(f\" {num} {my_word [el]}\")\n# num += 1\n# else:\n# print(f\" {num} {my_word [el] [0:10]}\")\n# num += 1\n\n\n# my_list = [7, 5, 3, 3, 2]\n# print(f\"Рейтинг - {my_list}\")\n# digit = int(input(\"Введите число\"))\n# for el in range(len(my_list)):\n# if my_list[el] == digit:\n# my_list.insert(el + 1, digit)\n# break\n# elif my_list[0] < digit:\n# my_list.insert(0, digit)\n# elif my_list[-1] > digit:\n# my_list.append(digit)\n# elif my_list[el] > digit and my_list[el + 1] < digit:\n# my_list.insert(el + 1, digit)\n# print(f\"Обновленный рейтинг - {my_list}\")\n\n\n# goods = int(input(\"Какое количество товара хотите внести в базу? \"))\n# n = 1\n# my_dict = []\n# my_list = []\n# my_analys = {}\n# while n <= goods:\n# my_dict = dict({'Название': input(\"Введите название \"), 'Цена': input(\"Введите цену \"),\n# 'Количество': input('Введите количество '), 'Ед': input(\"Введите единицу измерения \")})\n# my_list.append((n, my_dict))\n# n += 1\n# my_analys = dict(\n# {'Название': my_dict.get('Название'), 'Цена': my_dict.get('Цена'), 'Количество': my_dict.get('Количество'),\n# 'Ед': my_dict.get('Ед')})\n# print(my_analys)\n\n\n\n" } ]
1
Pratham-Pandey/Snake_Game
https://github.com/Pratham-Pandey/Snake_Game
d76de63e990e0cc30533fadf442ec3207bb4dc29
0b82b62145b3f848a19bba0ad08a2c45bb32143a
a14441facaf11fda1de1ddd25f4ff637054d8f76
refs/heads/master
2023-08-02T14:14:32.744416
2021-10-05T13:14:25
2021-10-05T13:14:25
413,531,323
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47895917296409607, "alphanum_fraction": 0.5189771056175232, "avg_line_length": 30.04178237915039, "blob_id": "8ce8959f3b9a9deee641d045dce5d6aadacda274", "content_id": "ee2041efe755d5db35730832efd56d10cf0f9a02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11145, "license_type": "no_license", "max_line_length": 217, "num_lines": 359, "path": "/main.py", "repo_name": "Pratham-Pandey/Snake_Game", "src_encoding": "UTF-8", "text": "import pygame\nimport random\n\n#Initializing Pygame and Music\npygame.init()\npygame.mixer.init()\n\nclass snake:\n def __init__(self):\n # Movement Related Variables\n self.diff_x=0\n self.diff_y=0\n\n #Snake position and size\n self.obj_x=200\n self.obj_y=200\n self.obj_size=20\n\n def dis(self, snake_size, snake_list):\n for xy in snake_list:\n pygame.draw.rect(win, green, [xy[0], xy[1], self.obj_size, self.obj_size])\nclass apple:\n def __init__(self):\n self.apple_x=100\n self.apple_y=100\n self.size=20\n\n def dis(self):\n pygame.draw.rect( win,red, [self.apple_x, self.apple_y, self.size, self.size])\n\nclass boundary:\n def __init__(self):\n self.width=20\n\n def bound_calc(self,var):\n n = var\n bound_final = 0\n c = 0\n\n while n > 0:\n bound_mid = n % 10\n if c > 0:\n bound_final = bound_final * 10 + bound_mid\n n = n // 10\n c = c + 1\n\n bound_mid = 0\n bound_super_final = 0\n\n while bound_final > 0:\n bound_mid = bound_final % 10\n bound_super_final = bound_super_final * 10 + bound_mid\n bound_final = bound_final // 10\n\n return var//bound_super_final\n\n def disp(self):\n pygame.draw.rect(win, black, [0, 0, self.bound_calc(x), y])\n pygame.draw.rect(win, black, [0, 0, x, self.bound_calc(y)])\n pygame.draw.rect(win, black, [0, y-self.bound_calc(y), x, self.bound_calc(y)])\n pygame.draw.rect(win, black, [x-(self.bound_calc((x))*30), 0, self.bound_calc(x)*30, y])\n\nclass button:\n def __init__(self, x, y, color_1, color_2, button_width):\n self.x=x\n self.y=y\n\n self.width=button_width\n self.height=70\n\n self.diff=10\n\n self.color_1=color_1\n self.color_2=color_2\n\n self.mouse_click = pygame.mouse.get_pressed()\n self.mou_pos= pygame.mouse.get_pos()\n\n\n def disp(self, operation):\n global game_over\n global menu_on\n global credits\n\n #Loading Sound Effects\n hover_sound = pygame.mixer.Sound('hover.wav')\n click_sound = pygame.mixer.Sound('click_sound.wav')\n\n pygame.draw.rect(win, black, [self.x, self.y, self.width, self.height])\n\n box=pygame.draw.rect(win, self.color_1, [self.x + self.diff, self.y + self.diff, self.width - self.diff*2, self.height - self.diff*2])\n if self.mou_pos[0] > self.x + self.diff and self.mou_pos[0] < self.x + self.width - self.diff and self.mou_pos[1] > self.y + self.diff and self.mou_pos[1] < self.y + b1.height - self.diff:\n box = pygame.draw.rect(win, self.color_2, [self.x + self.diff, self.y + self.diff, self.width - self.diff * 2, self.height - self.diff * 2])\n hover_sound.play(0)\n\n if self.mouse_click[0] == True:\n click_sound.play(0)\n if operation == 1:\n game_over = False\n\n\n elif operation==2:\n credits = True\n menu_on = False\n\n elif operation == 3:\n pygame.quit()\n quit()\n\n elif operation == 4:\n credits = False\n menu_on = True\n\nclass text:\n def disp(self, message, size, color, pos_x, pos_y):\n font = pygame.font.SysFont(\"segoe print\", size, True)\n display_text = font.render(message, True, color)\n win.blit(display_text,[pos_x,pos_y])\n\nquit_game = False\ngame_over = True\nmenu_on = True\nretry = False\npause = False\ncredits = False\n\n# Colors\nblack = (0, 0, 0)\nred = (255, 0, 0)\nwhite = (255, 255, 255)\ngreen = (0, 175, 0)\nlight_green = (0, 255, 0)\nmidnight_blue = (0,255,255)\norange = (255,140,0)\ngold = (255, 215, 0)\nmaroon = (128, 0, 0)\nyellowgreen = (154, 205, 50)\nseablue = (0, 105, 148)\nmagenta = (255, 0, 255)\n\nwin=pygame.display.set_mode()\nx,y=win.get_size()\n\nsnake_list=[]\nsnake_size=5\n\nprint(\"X=\",x,\"Y=\",y)\ntest3=102\n\npygame.display.set_caption(\"Snake\")\n\nclock = pygame.time.Clock()\n\npygame.mixer.music.load('music1.mp3')\npygame.mixer.music.play(-1)\n\nwhile quit_game == False:\n obj_1 = snake()\n apple_obj = apple()\n draw_bound = boundary()\n\n score = 0\n\n snake_list = []\n\n button_width = 200\n button_x = x/2-(button_width/2)\n\n b1 = button(button_x, y/2, orange,midnight_blue, button_width)\n text_1 = text()\n\n b2 = button(button_x, y/2+100, orange, midnight_blue, button_width)\n text_2 = text()\n\n b3 = button(button_x, y/2+200, orange, midnight_blue, button_width)\n text_3 = text()\n\n text_retry_screen = text()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit_game=True\n\n if menu_on == True:\n win.fill(maroon)\n b1.disp(1)\n text_1.disp(\"Play\", 40, red, button_x+55, y/2 )\n b2.disp(2)\n text_2.disp(\"Credits\", 40, red, button_x+30, y/2+102)\n b3.disp(3)\n text_3.disp(\"Quit\", 40, red, button_x+55, y/2+202)\n\n print(game_over)\n\n elif credits == True:\n win.fill(maroon)\n b_bttn_text=text()\n b_bttn_text.disp(\"Instructions: \", 50, yellowgreen, x / 3+100, 100)\n b_bttn_text.disp(\"Press 'Esc' To pause and 'p' to resume \", 50, midnight_blue, x / 4, 150)\n\n b_bttn_text.disp(\"Created By\", 50, yellowgreen, x/3+100 , 300)\n b_bttn_text.disp(\"Pratham Pandey\", 50, midnight_blue, x / 3+20, 350)\n b_bttn_text.disp(\"Music\", 50, yellowgreen, x / 3+100, 500)\n b_bttn_text.disp(\"LINK\", 50, midnight_blue, x / 3, 550)\n\n\n b_bttn=button(100, y-100, orange, midnight_blue, 200 )\n b_bttn.disp(4)\n b_bttn_text.disp(\"Back\", 35, maroon, 150, y-100)\n\n elif retry == True:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n game_over=False\n menu_on=False\n retry=False\n elif event.key == pygame.K_BACKSPACE:\n menu_on=True\n game_over=True\n retry=False\n\n win.fill(maroon)\n text_retry_screen.disp(\"Press Enter To Retry and Backspace To Go To Menu\", 45, yellowgreen, x/11, y / 2)\n\n pygame.display.update()\n clock.tick(15)\n\n restrict_movment = [1, 1, 1, 1]\n\n while game_over == False:\n current_score_x = 1200\n current_score_y = 200\n\n high_score_x = 1160\n high_score_y = 500\n current_score_radius = 30\n score=str(score)\n current_score_text = text()\n high_score_text = text()\n\n current_score_text1 = text()\n high_score_text1 = text()\n\n win.fill(gold)\n apple_obj.dis()\n obj_1.dis(snake_size, snake_list)\n draw_bound.disp()\n\n # File Handling\n f = open(\"h_score.txt\", \"r\")\n h_score = f.read()\n\n #Loading Sound Effects\n eat_sound = pygame.mixer.Sound('sound1.mp3')\n hi_score_sound = pygame.mixer.Sound(\"sound2.mp3\")\n game_over_sound = pygame.mixer.Sound('game_over.wav')\n\n pygame.draw.rect(win, midnight_blue, pygame.Rect(high_score_x, current_score_y, 100, 50), 0, 5)\n current_score_text.disp(score, 45, magenta, current_score_x - 30, current_score_y -20)\n current_score_text1.disp(\"Score\", 45, orange, current_score_x - 50, current_score_y - 70)\n\n\n pygame.draw.rect(win, midnight_blue, pygame.Rect(high_score_x, high_score_y, 115, 50), 0, 5)\n high_score_text.disp(h_score, 45, magenta, high_score_x+ 7, high_score_y-15)\n high_score_text1.disp(\"High-Score\", 45, orange, high_score_x - 50, high_score_y -75)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n quit_game=True\n pygame.quit()\n quit()\n\n if event.type == pygame.KEYDOWN:\n if restrict_movment[0] == 1:\n if event.key == pygame.K_LEFT:\n obj_1.diff_x = -10\n obj_1.diff_y = 0\n restrict_movment = [1,1,1,0]\n\n if restrict_movment[3] == 1:\n if event.key == pygame.K_RIGHT:\n obj_1.diff_x = 10\n obj_1.diff_y = 0\n restrict_movment = [0,1,1,1]\n\n if restrict_movment[2] == 1:\n if event.key == pygame.K_DOWN:\n obj_1.diff_y = 10\n obj_1.diff_x = 0\n restrict_movment = [1,0,1,1]\n if restrict_movment[1] == 1:\n if event.key == pygame.K_UP:\n obj_1.diff_y = -10\n obj_1.diff_x = 0\n restrict_movment = [1,1,0,1]\n\n if event.key == pygame.K_ESCAPE:\n temp_diff_x = obj_1.diff_x\n temp_diff_y = obj_1.diff_y\n pause = True\n\n while pause == True:\n\n obj_1.diff_x = 0\n obj_1.diff_y = 0\n\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_p:\n pause=False\n obj_1.diff_x = temp_diff_x\n obj_1.diff_y = temp_diff_y\n\n if obj_1.obj_x == apple_obj.apple_x and obj_1.obj_y == apple_obj.apple_y:\n apple_obj.apple_x = int(round(random.randrange(draw_bound.bound_calc(x), x-(draw_bound.bound_calc(x) * 30) - apple_obj.size) / 10.0) * 10.0)\n apple_obj.apple_y = int(round(random.randrange(draw_bound.bound_calc(y), y-draw_bound.bound_calc(y) - apple_obj.size) / 10.0) * 10.0)\n snake_size += 1\n score = int(score)+1\n eat_sound.play(0)\n\n if score>int(h_score):\n h_score = score\n hi_score_sound.play(0)\n\n obj_1.obj_x += obj_1.diff_x\n obj_1.obj_y += obj_1.diff_y\n\n if obj_1.obj_x+obj_1.obj_size > x-(draw_bound.bound_calc((x))*30) or obj_1.obj_x < draw_bound.bound_calc(x) or obj_1.obj_y+obj_1.obj_size > y-draw_bound.bound_calc(y) or obj_1.obj_y < draw_bound.bound_calc(y):\n retry = True\n game_over = True\n menu_on = False\n game_over_sound.play(0)\n\n snake_head = []\n snake_head.append(obj_1.obj_x)\n snake_head.append(obj_1.obj_y)\n snake_list.append(snake_head)\n\n if len(snake_list) > snake_size:\n del snake_list[0]\n\n if snake_size >= 6:\n if snake_head in snake_list[:len(snake_list)-2]:\n snake_size = 5\n game_over = True\n menu_on = False\n retry = True\n\n f = open(\"h_score.txt\", \"w\")\n f.write(str(h_score))\n print(\"H_SCORE: \", h_score)\n f.close()\n\n pygame.display.update()\n clock.tick(15)\n\npygame.quit()\nquit()\n\n" } ]
1
mandrewd/pre-screen
https://github.com/mandrewd/pre-screen
dedf86837009775f960c981a121fdb271b75d211
cd8ae815254152d765566fbac71f07d6c7123641
d3a3805b4622ed886618fa2fc5387e04e2dc0e7d
refs/heads/master
2020-07-11T15:23:40.303559
2019-08-28T06:35:26
2019-08-28T06:35:26
204,582,765
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6041009426116943, "alphanum_fraction": 0.6167192459106445, "avg_line_length": 26.565217971801758, "blob_id": "8c45d0040296452f27ad0996832d8aeb7befa05c", "content_id": "475c4f63a140d31fc4ac9295ec339e4734b23992", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 634, "license_type": "no_license", "max_line_length": 86, "num_lines": 23, "path": "/birthdayEmailer.py", "repo_name": "mandrewd/pre-screen", "src_encoding": "UTF-8", "text": "import redis\n\n\n# placeholder for the externally defined function\ndef sendBirthdayEmail():\n pass\n\n\ndef birthdayEmailer():\n # the redis config values should be obtained from config, not passed in\n r = redis.Redis(host='localhost', port=6379, db=0)\n today = datetime.date.today()\n for ut in r.hscan_iter('users'):\n keys = ut[::2]\n vals = ut[1::2]\n user = dict(zip(keys, vals))\n d = datetime.datetime.strptime(user['birthday'], \"%Y-%m-%dT%H:%M:%S%z\").date()\n if d.month == today.month and d.day == today.month:\n sendBirthdayEmail()\n\n\nif __name__ == main:\n birthdayEmailer()\n" }, { "alpha_fraction": 0.7147058844566345, "alphanum_fraction": 0.7220588326454163, "avg_line_length": 47.64285659790039, "blob_id": "09c9db0ae296d958866e40ec457f19c98ea8b2a9", "content_id": "52e2edfd41f9a2e94d7e45ab4346837f47aca959", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 680, "license_type": "no_license", "max_line_length": 116, "num_lines": 14, "path": "/README.md", "repo_name": "mandrewd/pre-screen", "src_encoding": "UTF-8", "text": "This a quick fix of that code from the junior developer. We can `HSCAN` the hash, iterating over each user once.\n We check the hash value \n\nReally, we should store the `user_id` tokens in a redis `SET` for each day of the year, and then scan over each \n`user_id` in the set for the day we wish to process using the `SSCAN` operator. That solution would work like this:\n```python\nr = redis.Redis(host='localhost', port=6379, db=0)\ntoday = datetime.date.today()\nmonth = today.month\nday = today.day\ncollections.deque(map(sendBirthdayEmail,r.sscan_iter(f\"{month}-{day}\")))\n```\n\nBoth `SSCAN` and `HSCAN` will run in `O(n)` time, where `n` is the number of items in the set or hash." } ]
2
JaishreeJanu/Udacity-full-stack-developer-program
https://github.com/JaishreeJanu/Udacity-full-stack-developer-program
d38fcfae73491479ee702dfd5e76f8bcefed0b1d
309f8efc5622876dbe4060e8696ba35f5434243f
995ca915a08c5241f84c6ece57f20d3d01c92a58
refs/heads/master
2021-01-06T00:38:16.298670
2020-10-23T16:24:49
2020-10-23T16:24:49
241,179,129
3
1
null
null
null
null
null
[ { "alpha_fraction": 0.6327828764915466, "alphanum_fraction": 0.6497021913528442, "avg_line_length": 24.74216079711914, "blob_id": "d766e7b00a444bc5427c7f44604a44a3f3654dea", "content_id": "eac2f2c4c9c58c4d946c32559ca7a0185a3333a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7388, "license_type": "no_license", "max_line_length": 154, "num_lines": 287, "path": "/capstone project/README.md", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "## Casting Agency\n\n\nFollowing topics, libraries and skills :\n1. SQL and database modeling for the web ```(SQL, flask-sqlalchemy, flask-migrate)```\n2. API development,testing endpoints and documentation ```(flask, unittest)```\n3. Identity and access management ```(Auth0)```\n4. Server deployment, Containerization and testing ```(docker and kubernetes)```\n\n\nThis app has been deployed on heroku:\n[URL of this app](https://final-capstone-udacity.herokuapp.com/actors)\n\n\n## Project dependencies, local development and hosting instructions\n\n1. First cd into the project folder\n2. Install [python](https://www.python.org/downloads/) and [postgres](https://www.postgresql.org/download/).\n3. Initialize a virtual environment:\n```\n$ virtualenv --no-site-packages env\n$ source env/scripts/activate\n``` \n4. Install the dependencies:\n```\npip3 install requirements.txt\n```\n5. Setup database in ```models.py ```:\n```\ndatabase_path = \"postgres://{}:{}@{}/{}\".format(<user-name>,'<password>','localhost:5432', <database_name>)\n```\n\n6. Setup Auth0:\n - create an account on auth0\n - Create an application <casting-agency>\n - Create an API <castings>\n - Define permissions in API. Following permissions defined :\n ``` \n read:actors, read:movies, add:actor, add:movie, modify:actor, modify:movie, delete:actor, delete:movie\n ```\n - Define role: Casting assistant, casting director and casting producer \n - Give permissions to the roles\n\n7. ```export FLASK_APP=app.py\n\texport FLASK_ENV=development```\n8. Now start local development server\n```flask run ```\n\n9. All endpoints written in ```app.py```, models in ```models.py```, config variable in ```config.py``` and all dependencies are in ```requirements.txt```\n10. To tun the ```test_app.py``` file, execute ```python3 test_app.py```.\n\n## API documentation and RBAC controls\n\nThe roles and their permissions have been explained here:\n1. Casting assistant has following permissions:\n- GET /actors: Can view all actors\n- GET /movies: Can view all movies\n\n2. Casting diresctor has all permissions of assistant and other permissions as well:\n- PATCH /actors/<actor-id>: Can modify an actor\n- PATCH /movies/<movies-id>:Can modify a movie\n- DELETE /actors/<actor-id>: Can delete an actor\n- POST /actors: Can add a new actor\n\n3. Casting producer hase following additional permissions:\n- POST /movies: Can a add movie\n- DELETE /movies/<movie-id>: Can delete a movie\n\nAll the endpoints and routes of this app have been explained here:\n\n### GET /actors\n- Returns a list of all actors and their details :name,age and gender.\n- Send the following request with ```Authorization header``` (It contains ```read:actors``` permission)\n\n- Casting assistant, casting director and casting producer have the permission to get actors.\n ```\n https://final-capstone-udacity.herokuapp.com/actors?page=1\n ```\n- Gives following response:\n```\n{\n \"actors\": [\n {\n \"age\": 40,\n \"gender\": \"Female\",\n \"id\": 1,\n \"name\": \"Gisele Budchen\"\n },\n {\n \"age\": 60,\n \"gender\": \"Male\",\n \"id\": 3,\n \"name\": \"Alpachinno\"\n }\n ],\n \"success\": true\n}\n```\n\n### GET /movies\n- Returns a list of all movies and details : title and release_date.\n- Send the following request with ```Authorization header``` (It contains ```read:movies``` permission)\n\n- Casting assistant, casting director and casting producer have the permission to get movies.\n ```\n https://final-capstone-udacity.herokuapp.com/movies?page=1\n ```\n- Gives following response:\n```\n{\n \"movies\": [\n {\n \"id\": 2,\n \"release_date\": \"Tue, 12 Oct 2021 00:00:00 GMT\",\n \"title\": \"83\"\n },\n {\n \"id\": 3,\n \"release_date\": \"Tue, 12 Oct 2021 00:00:00 GMT\",\n \"title\": \"Chapaak\"\n },\n {\n \"id\": 4,\n \"release_date\": \"Tue, 12 Oct 2021 00:00:00 GMT\",\n \"title\": \"Mahabhartha\"\n },\n {\n \"id\": 1,\n \"release_date\": \"Sat, 29 Feb 2020 00:00:00 GMT\",\n \"title\": \"Based on gernder roles\"\n }\n ],\n \"success\": true\n}\n```\n### POST /actors\n- Add a new actor in the database and return success and the id of newly created record.\n- Send following json in the body:\n```\n{\n\t\"name\":\"Anny Hathway\",\n\t\"age\":32,\n\t\"gender\": \"Female\"\n\t\n}\n```\n- Also send the token which has ```add:actor``` permission. Casting director and Casting producer have the permission to do so.\n- Send a POST request to this url:\n```\nhttps://final-capstone-udacity.herokuapp.com/actors\n```\n- It gives this response:\n```\n{\n \"actor\": 4,\n \"success\": true\n}\n```\n- If request is sent without required permissions , gives this response:\n```\n{\n \"error\": 403,\n \"message\": \"Permission not found.\",\n \"success\": false\n}\n```\n\n### POST /movies\n- Add a new movie in the database and return success and the id of newly created record.\n- Send following json in the body:\n```\n{\n\t\"title\":\"Safe heaven\",\n\t\"release_date\": \"Fri, 24 Apr 2021 00:00:00 GMT\"\n\t\n}\n```\n- Also send the token which has ```add:movie``` permission. Only Casting producer have the permission to do so.\n- Send a POST request to this url:\n```\nhttps://final-capstone-udacity.herokuapp.com/movies\n```\n- It gives this response:\n```\n{\n \"movie\": 5,\n \"success\": true\n}\n```\n- If request is sent without required permissions , gives this response:\n```\n{\n \"error\": 403,\n \"message\": \"Permission not found.\",\n \"success\": false\n}\n```\n### Patch /actors/<int:actor_id>\n- Updates an actor with given id and return success message with id of modified actor.\n- Authorization header must have ``` modify:actor ``` permission.\n- Send patch request to:\n```\nhttps://final-capstone-udacity.herokuapp.com/actors/3\n```\n- Send request with a json body:\n```\n{\n\t\"name\":\"Ranveer singh\",\n\t\"gender\":\"Male\"\n}\n```\n- Return following response:\n```\n{\n \"actor\": 3,\n \"success\": true\n}\n```\n- If request is sent without required permission ```modify:actor```\n- Gives following response:\n```\n{\n \"error\": 403,\n \"message\": \"Permission not found.\",\n \"success\": false\n}\n```\n\n### Patch /movies/<int:movie_id>\n- Updates a movie with given id and return success message with id of modified movie.\n- Authorization header must have ``` modify:movie ``` permission.\n- Send patch request to:\n```\nhttps://final-capstone-udacity.herokuapp.com/movies/1\n```\n- With JSON body:\n```\n{\n \"title\": \"Draupadi\"\n}\n```\n- Return this reponse\n```\n{\n \"movie\": 1,\n \"success\": true\n}\n```\n- If request is sent without required permission ```modify:actor```\n- Gives following response:\n```\n{\n \"error\": 403,\n \"message\": \"Permission not found.\",\n \"success\": false\n}\n```\n\n### DELETE /actors/<actor_id>\n- Deletes the given actor record and return success message with the of deleted actor.\n- Authorization header should have ``` delete:actor ``` permission.\n- Send DELETE request to this url:\n```\nhttps://final-capstone-udacity.herokuapp.com/actors/1\n```\n- Returns this response:\n```\n{\n \"actor\": 1,\n \"success\": true\n}\n```\n\n### DELETE /movies/<movie_id>\n- Deletes the given movie record and return success message with the of deleted movie.\n- Authorization header should have ``` delete:movie ``` permission.\n- Send DELETE request to this url:\n```\nhttps://final-capstone-udacity.herokuapp.com/movies/1\n```\n- Returns this response:\n```\n{\n \"movie\": 1,\n \"success\": true\n}\n```\n" }, { "alpha_fraction": 0.7870370149612427, "alphanum_fraction": 0.7870370149612427, "avg_line_length": 35, "blob_id": "3292336ef9ef2a14d0ab86b57adda4fa258bd93e", "content_id": "da7825e12c74cf22644c8a301f96882fc4a01f14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 108, "license_type": "no_license", "max_line_length": 72, "num_lines": 3, "path": "/deploy flask app on kubernetes/README.md", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "## Deploy Flask app on kubernetes\n\n[Project link](https://github.com/JaishreeJanu/deployment-on-kubernetes)\n" }, { "alpha_fraction": 0.616368293762207, "alphanum_fraction": 0.6368286609649658, "avg_line_length": 20.72222137451172, "blob_id": "9c8bb61005ca89e3855e80a477589b29fa06719c", "content_id": "845eeb527db4dbcbd31e9a4356bddeba479412e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 52, "num_lines": 18, "path": "/capstone project/starter/config.py", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "import os\n\nSECRET_KEY = os.urandom(32)\n\n# Grabs the folder where the script runs.\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nauth0_config = {\n \"AUTH0_DOMAIN\": \"auth-fsnd.eu.auth0.com\",\n \"ALGORITHMS\": [\"RS256\"],\n \"API_AUDIENCE\": \"castings\",\n}\n\nbearer_tokens = {\n \"casting_assistant\": \"Bearer \",\n \"casting_director\": \"Bearer \",\n \"executive_producer\": \"Bearer \",\n}\n" }, { "alpha_fraction": 0.5332596898078918, "alphanum_fraction": 0.5454099178314209, "avg_line_length": 29.51685333251953, "blob_id": "8d8d04bdfc54680cfbbcd97a14c98cd063c63422", "content_id": "551fd1a4164b16e20baa367a33bf30467ca91f93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8148, "license_type": "no_license", "max_line_length": 78, "num_lines": 267, "path": "/capstone project/starter/app.py", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom auth import AuthError, requires_auth\nfrom models import setup_db, db_insert_records, Actor, Movie\n\nQUESTIONS_PER_PAGE = 10\n\n\ndef paginate(request, results):\n \"\"\"return results in group of 10\n 10 set of questions shown on each page\n Arguments:\n request {json} -- json body woth page parameter\n results {list} -- results\n \n Returns:\n list -- questions on current page\n \"\"\"\n page = request.args.get(\"page\", 1, type=int)\n start = (page - 1) * QUESTIONS_PER_PAGE\n end = start + QUESTIONS_PER_PAGE\n\n all_results = [result.format() for result in results]\n paginated_results = all_results[start:end]\n return paginated_results\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n setup_db(app)\n CORS(app)\n\n @app.after_request\n def after_request(response):\n response.headers.add(\n \"Access-Control-Allow-Headers\", \"Content-Type,Authorization\"\n )\n response.headers.add(\n \"Access-Control-Allow-Methods\", \"GET,PATCH,POST,DELETE,OPTIONS\"\n )\n return response\n\n @app.route(\"/actors\", methods=[\"GET\"])\n @requires_auth(\"read:actors\")\n def get_actors(token):\n \"\"\"gets all actors\n if token has 'read:actors' permission\n Returns:\n json -- success value and list of actors\n \"\"\"\n all_actors = Actor.query.all()\n actors = paginate(request, all_actors)\n\n if len(actors) == 0:\n abort(404, {\"message\": \"OOPS! No actors willing to work\"})\n\n return jsonify({\"success\": True, \"actors\": actors}), 200\n\n @app.route(\"/movies\", methods=[\"GET\"])\n @requires_auth(\"read:movies\")\n def get_movies(token):\n \"\"\"get all movies\n if token has 'read:movies' permission\n Returns:\n json -- success value and list of movies\n \"\"\"\n all_movies = Movie.query.all()\n movies = paginate(request, all_movies)\n\n if len(movies) == 0:\n abort(404, {\"message\": \"OOPS! No one is making movies\"})\n\n return jsonify({\"success\": True, \"movies\": movies}), 200\n\n @app.route(\"/actors\", methods=[\"POST\"])\n @requires_auth(\"add:actor\")\n def add_actor(token):\n \"\"\"Adds a new record in actor db table\n if token has 'add:actor' permission\n Returns:\n json -- success value and id of new record\n \"\"\"\n data = request.get_json()\n if not data.get(\"name\"):\n abort(400, {\"message\": 'Please add \"name\" in the json'})\n\n name = data.get(\"name\")\n age = data.get(\"age\")\n gender = data.get(\"gender\")\n new_actor = Actor(name=name, age=age, gender=gender)\n new_actor.insert()\n\n return jsonify({\"success\": True, \"actor\": new_actor.id}), 200\n\n @app.route(\"/movies\", methods=[\"POST\"])\n @requires_auth(\"add:movie\")\n def add_movie(token):\n \"\"\"Adds a new record in movie db table\n if token contains 'add:movie' permission\n Returns:\n json -- success value and id of new record\n \"\"\"\n data = request.get_json()\n if not data:\n abort(400, {\"message\": \"there is no json body\"})\n\n title = data.get(\"title\")\n release_date = data.get(\"release_date\")\n\n new_movie = Movie(title=title, release_date=release_date)\n new_movie.insert()\n\n return jsonify({\"success\": True, \"movie\": new_movie.id}), 200\n\n @app.route(\"/actors/<int:actor_id>\", methods=[\"PATCH\"])\n @requires_auth(\"modify:actor\")\n def modify_actor(token, actor_id):\n \"\"\"modifies the actor details with the actor_id,\n token must contain 'modify:actor' permission\n Arguments:\n actor_id {int}: actor id\n Returns:\n json -- success value and id of updated record\n \"\"\"\n data = request.get_json()\n if not data:\n abort(400, {\"message\": \"there is no json body\"})\n this_actor = Actor.query.get(actor_id)\n\n if not this_actor:\n abort(404, {\"message\": \"No actor with this id\"})\n new_name = data.get(\"name\")\n new_age = data.get(\"age\")\n new_gender = data.get(\"gender\")\n\n if new_name:\n this_actor.name = new_name\n if new_age:\n this_actor.age = new_age\n if new_gender:\n this_actor.gender = new_gender\n\n this_actor.update()\n return jsonify({\"success\": True, \"actor\": this_actor.id}), 200\n\n @app.route(\"/movies/<int:movie_id>\", methods=[\"PATCH\"])\n @requires_auth(\"modify:movie\")\n def modify_movie(token, movie_id):\n \"\"\"modifies the movie details with the movie_id,\n token must contain 'modify:movie' permission\n Arguments:\n movie_id {int}: movie id\n Returns:\n json -- success value and id of updated record\n \"\"\"\n data = request.get_json()\n if not data:\n abort(400, {\"message\": \"there is no json body\"})\n this_movie = Movie.query.get(movie_id)\n\n if not this_movie:\n abort(404, {\"message\": \"No movie with this id\"})\n\n if data.get(\"title\"):\n new_title = data.get(\"title\")\n this_movie.title = new_title\n if data.get(\"release_date\"):\n new_release_date = data.get(\"release_date\")\n this_movie.new_release_date = new_release_date\n\n this_movie.update()\n return jsonify({\"success\": True, \"movie\": this_movie.id}), 200\n\n @app.route(\"/actors/<int:actor_id>\", methods=[\"DELETE\"])\n @requires_auth(\"delete:actor\")\n def delete_actor(token, actor_id):\n \"\"\"deletes actor with actor_id,\n should contain 'delete:actor' permission\n Arguments:\n actor_id {int}: actor id\n Returns:\n json -- success value and id of deleted record\n \"\"\"\n actor = Actor.query.get(actor_id)\n if not actor:\n abort(404, {\"message\": \"No actor with this id\"})\n actor.delete()\n\n return jsonify({\"success\": True, \"actor\": actor.id}), 200\n\n @app.route(\"/movies/<int:movie_id>\", methods=[\"DELETE\"])\n @requires_auth(\"delete:movie\")\n def delete_movie(token, movie_id):\n \"\"\"deletes movie with movie_id,\n should contain 'delete:movie' permission\n Arguments:\n movie_id {int}: movie id\n Returns:\n json -- success value and id of deleted record\n \"\"\"\n movie = Movie.query.get(movie_id)\n if not movie:\n abort(404, {\"message\": \"No movie with this id\"})\n movie.delete()\n\n return jsonify({\"success\": True, \"movie\": movie.id}), 200\n\n @app.errorhandler(400)\n def bad_request(error):\n return (\n jsonify(\n {\n \"success\": False,\n \"error\": 400,\n \"message\": error.description[\"message\"],\n }\n ),\n 400,\n )\n\n @app.errorhandler(404)\n def resource_not_found(error):\n return (\n jsonify(\n {\n \"success\": False,\n \"error\": 404,\n \"message\": error.description[\"message\"],\n }\n ),\n 404,\n )\n\n @app.errorhandler(422)\n def unprocessable(error):\n return (\n jsonify(\n {\n \"success\": False, \"error\": 422, \"message\": \"unprocessable\"\n }\n ),\n 422,\n )\n\n @app.errorhandler(AuthError)\n def authentification_error(AuthError):\n return (\n jsonify(\n {\n \"success\": False,\n \"error\": AuthError.status_code,\n \"message\": AuthError.error[\"description\"],\n }\n ),\n AuthError.status_code,\n )\n\n return app\n\n\napp = create_app()\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8080, debug=True)\n" }, { "alpha_fraction": 0.5976896286010742, "alphanum_fraction": 0.6099112629890442, "avg_line_length": 33.92982482910156, "blob_id": "cbc29cd0797d4e9300099f2edc3acc436a5dadcd", "content_id": "d9ffa63b55a0ddcc7e1dc5730b30a807e0db5594", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5973, "license_type": "no_license", "max_line_length": 91, "num_lines": 171, "path": "/trivia_api/starter/backend/test_flaskr.py", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question, Category\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n # self.database_name = \"trivia_test\"\n self.database_path = \"postgres://{}:{}@{}/{}\".format(\n \"jaishree\", \"password\", \"localhost:5432\", \"trivia_test\"\n )\n setup_db(self.app, self.database_path)\n\n self.headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n }\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n self.new_question = {\n \"question\": \"Which state of India is known as heaven on earth\",\n \"answer\": \"Kashmir\",\n \"difficulty\": 3,\n \"category\": 3,\n }\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n \"\"\"\n Write at least one test for each test for successful operation and for expected errors.\n \"\"\"\n\n def test_get_categories(self):\n res = self.client().get(\"/categories\")\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertTrue(isinstance(data[\"categories\"], dict))\n\n def test_get_paginated_questions(self):\n res = self.client().get(\"/questions/page=1\")\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertTrue(isinstance(data[\"categories\"], dict))\n self.assertTrue(isinstance(data[\"questions\"], list))\n self.assertEqual(data[\"total_questions\"], 10)\n\n def test_get_questions_404_error(self):\n res = self.client().get(\"/questions/page=50\")\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data[\"message\"], \"resource not found\")\n self.assertEqual(data[\"success\"], False)\n\n def test_delete_question(self):\n res = self.client().delete(\"/questions/28\")\n data = json.loads(res.data)\n\n question = Question.query.filter(Question.id == 28).one_or_none()\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertEqual(question, None)\n\n def test_delete_question_404_error(self):\n res = self.client().delete(\"/questions/17\")\n data = json.loads(res.data)\n\n self.assertEqual(data[\"message\"], \"resource not found\")\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data[\"success\"], False)\n\n def test_post_question_success(self):\n res = self.client().post(\"/questions\", json=self.new_question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertEqual(data[\"question\"], self.new_question[\"question\"])\n self.assertEqual(data[\"answer\"], self.new_question[\"answer\"])\n self.assertEqual(data[\"category\"], self.new_question[\"category\"])\n self.assertEqual(data[\"difficulty\"], self.new_question[\"difficulty\"])\n\n def test_post_question_failure(self):\n insert_question = {\n \"question\": \"new_question\",\n \"answer\": \"\",\n \"category\": 2,\n \"difficulty\": 3,\n }\n res = self.client().post(\"/questions\", json=insert_question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data[\"success\"], False)\n self.assertEqual(data[\"message\"], \"bad request\")\n\n def test_question_search_success(self):\n res = self.client().post(\"/search\", json={\"searchTerm\": \"mirror\"})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertTrue(isinstance(data[\"questions\"], list))\n self.assertEqual(len(data[\"questions\"]), 1)\n\n def test_question_search_failure(self):\n res = self.client().post(\"/search\", json={\"searchTerm\": \"orange\"})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data[\"success\"], False)\n self.assertEqual(data[\"message\"], \"resource not found\")\n\n def test_get_category_question_success(self):\n res = self.client().get(\"/categories/2/questions\")\n data = json.loads(res.data)\n\n num_questions = Question.query.filter_by(category=2).count()\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertEqual(data[\"total_questions\"], num_questions)\n self.assertTrue(isinstance(data[\"questions\"], list))\n self.assertEqual(data[\"current_category\"], 2)\n\n def test_get_category_question_failure(self):\n res = self.client().get(\"/categories/8/questions\")\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data[\"success\"], False)\n self.assertEqual(data[\"message\"], \"resource not found\")\n\n def test_play_quiz(self):\n res = self.client().post(\n \"/quizzes\",\n json={\"previous_questions\": [1, 2, 3], \"quiz_category\": {\"id\": 2}},\n )\n data = json.loads(res.data)\n\n question = data[\"question\"]\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertTrue(question[\"id\"] not in [1, 2, 3])\n\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n" }, { "alpha_fraction": 0.539680004119873, "alphanum_fraction": 0.5414693355560303, "avg_line_length": 30.510780334472656, "blob_id": "3c76b32cf81254df39291d5476cd9cae190e4106", "content_id": "1345d4163d03d2f789056f086368735d58d3dcac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19002, "license_type": "no_license", "max_line_length": 88, "num_lines": 603, "path": "/fyyur-music-venue-booking-site/app.py", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "# ----------------------------------------------------------------------------#\n# Imports\n# ----------------------------------------------------------------------------#\n\nimport json\nimport dateutil.parser\nimport babel\nfrom flask import (\n Flask,\n render_template,\n request,\n Response,\n flash,\n redirect,\n url_for,\n abort,\n)\nfrom flask_moment import Moment\nimport logging\nfrom logging import Formatter, FileHandler\nfrom flask_wtf import FlaskForm\nfrom forms import *\nfrom models import setup_db, Musicshows, Venue, Artist\n\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom sqlalchemy import func\nimport sys\n\n# ----------------------------------------------------------------------------#\n# App Config.\n# ----------------------------------------------------------------------------#\n\napp = Flask(__name__)\nmoment = Moment(app)\ndb = setup_db(app)\n\n# ----------------------------------------------------------------------------#\n# Filters.\n# ----------------------------------------------------------------------------#\n\n\ndef format_datetime(value, format=\"medium\"):\n date = dateutil.parser.parse(value)\n if format == \"full\":\n format = \"EEEE MMMM, d, y 'at' h:mma\"\n elif format == \"medium\":\n format = \"EE MM, dd, y h:mma\"\n return babel.dates.format_datetime(date, format)\n\n\napp.jinja_env.filters[\"datetime\"] = format_datetime\n\n\n# ----------------------------------------------------------------------------#\n# Controllers.\n# ----------------------------------------------------------------------------#\n\n\[email protected](\"/\")\ndef index():\n return render_template(\"pages/home.html\")\n\n\n# Venues\n# ----------------------------------------------------------------\n\n\[email protected](\"/venues\")\ndef venues():\n \"\"\"Returns venues (grouped by city and state)\n along with number of upcoming shows in each (city, state).\n Returns:\n list of dictionary -- venues\n \"\"\"\n current_time = datetime.now()\n data = []\n venue_city_state = (\n db.session.query(Venue.city, Venue.state)\n .group_by(Venue.city, Venue.state)\n .all()\n )\n i = 0\n\n for v in venue_city_state:\n venue_data = (\n db.session.query(Venue)\n .filter(Venue.city == v.city, Venue.state == v.state)\n .all()\n )\n data.append({\"city\": v.city, \"state\": v.state, \"venues\": []})\n for each_venue in venue_data:\n upcoming_shows = (\n db.session.query(Musicshows)\n .filter(\n Musicshows.venue_id == each_venue.id,\n Musicshows.start_time > current_time,\n )\n .count()\n )\n data[i][\"venues\"].append(\n {\n \"id\": each_venue.id,\n \"name\": each_venue.name,\n \"num_upcoming_shows\": upcoming_shows,\n }\n )\n i += 1\n\n return render_template(\"pages/venues.html\", areas=data)\n\n\[email protected](\"/venues/search\", methods=[\"POST\"])\ndef search_venues():\n \"\"\"Returns venue results filtered by the search term.\n 'search_term' is received in JSON body here.\n Returns:\n dictionary -- venues\n \"\"\"\n search_term = request.form[\"search_term\"]\n search_results = db.session.query(Venue).filter(\n Venue.name.ilike(\"%\" + search_term + \"%\")\n )\n\n response = {\"count\": search_results.count(), \"data\": []}\n for result in search_results:\n num_upcoming_shows = (\n db.session.query(Musicshows)\n .filter(\n Musicshows.venue_id == result.id, Musicshows.start_time > datetime.now()\n )\n .count()\n )\n response[\"data\"].append(\n {\n \"id\": result.id,\n \"name\": result.name,\n \"num_upcoming_shows\": num_upcoming_shows,\n }\n )\n return render_template(\n \"pages/search_venues.html\",\n results=response,\n search_term=request.form.get(\"search_term\", \"\"),\n )\n\n\[email protected](\"/venues/<int:venue_id>\")\ndef show_venue(venue_id):\n \"\"\"returns details of venue with venue_id.\n Arguments:\n venue_id {int} -- venue id\n \n Returns:\n dictionary -- details of venue\n \"\"\"\n data = Venue.query.get(venue_id)\n venue_data = {}\n current_time = datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\")\n\n if data:\n past_results = (\n db.session.query(Musicshows)\n .filter(Musicshows.venue_id == venue_id)\n .filter(Musicshows.start_time < current_time)\n .all()\n )\n upcoming_results = (\n db.session.query(Musicshows)\n .filter(Musicshows.venue_id == venue_id)\n .filter(Musicshows.start_time > current_time)\n .all()\n )\n\n venue_data = Venue.details(data)\n venue_data[\"past_shows\"] = list(map(Musicshows.artist_details, past_results))\n venue_data[\"upcoming_shows\"] = list(\n map(Musicshows.artist_details, upcoming_results)\n )\n venue_data[\"past_shows_count\"] = len(past_results)\n venue_data[\"upcoming_shows_count\"] = len(upcoming_results)\n\n return render_template(\"pages/show_venue.html\", venue=venue_data)\n\n\n# Create Venue\n# ----------------------------------------------------------------\n\n\[email protected](\"/venues/create\", methods=[\"GET\"])\ndef create_venue_form():\n \"\"\"creates a venue form\n Returns:\n form -- form fields of different data types\n \"\"\"\n form = VenueForm()\n return render_template(\"forms/new_venue.html\", form=form)\n\n\[email protected](\"/venues/create\", methods=[\"POST\"])\ndef create_venue_submission():\n \"\"\"insert form data as a new venue record\n \"\"\"\n form = VenueForm(request.form)\n if form.is_submitted():\n print(\"Form successfully submitted\")\n\n if form.validate_on_submit():\n try:\n new_venue = Venue(\n name=request.form[\"name\"],\n city=request.form[\"city\"],\n state=request.form[\"state\"],\n address=request.form[\"address\"],\n phone=request.form[\"phone\"],\n facebook_link=request.form[\"facebook_link\"],\n genres=request.form.getlist(\"genres\"),\n image_link=request.form[\"image_link\"],\n website_link=request.form[\"website_link\"],\n seeking_talent=bool(request.form[\"seeking_talent\"]),\n seeking_description=request.form[\"seeking_description\"],\n )\n\n Venue.insert(new_venue)\n flash(\"Venue \" + request.form[\"name\"] + \" was successfully listed!\")\n\n except SQLAlchemyError as e:\n db.session.rollback()\n print(e)\n flash(\"OOPS!! Venue \" + request.form[\"name\"] + \" was not listed!\")\n finally:\n db.session.close()\n\n return render_template(\"pages/home.html\")\n\n\[email protected](\"/venues/<venue_id>\", methods=[\"DELETE\"])\ndef delete_venue(venue_id):\n \"\"\"deletes a record from venue model\n Arguments:\n venue_id {int} -- venue id\n \"\"\"\n venue_item = Venue.query.get(venue_id)\n try:\n Venue.delete(venue_item)\n flash(\"Venue item deleted successfully!\")\n except:\n db.session.rollback()\n flash(\"Venue item deletion failed!\")\n finally:\n db.session.close()\n return None\n\n\n# Artists\n# ----------------------------------------------------------------\[email protected](\"/artists\")\ndef artists():\n \"\"\"returns artist names\n Returns:\n list -- artists\n \"\"\"\n data = db.session.query(Artist.id, Artist.name).all()\n return render_template(\"pages/artists.html\", artists=data)\n\n\[email protected](\"/artists/search\", methods=[\"POST\"])\ndef search_artists():\n \"\"\"returns search results of artists\n Returns:\n dictionary -- artists\n \"\"\"\n\n search_term = request.form[\"search_term\"]\n search_results = db.session.query(Artist).filter(\n Artist.name.ilike(\"%\" + search_term + \"%\")\n )\n\n response = {\"count\": search_results.count(), \"data\": []}\n for result in search_results:\n num_upcoming_shows = (\n db.session.query(Musicshows)\n .filter(\n Musicshows.artist_id == result.id,\n Musicshows.start_time > datetime.now(),\n )\n .count()\n )\n response[\"data\"].append(\n {\n \"id\": result.id,\n \"name\": result.name,\n \"num_upcoming_shows\": num_upcoming_shows,\n }\n )\n\n return render_template(\n \"pages/search_artists.html\",\n results=response,\n search_term=request.form.get(\"search_term\", \"\"),\n )\n\n\[email protected](\"/artists/<int:artist_id>\")\ndef show_artist(artist_id):\n \"\"\"shows artist page with artist details\n Arguments:\n artist_id {int} -- artist id\n Returns:\n dictionary -- artist details\n \"\"\"\n data = Artist.query.get(artist_id)\n artist_data = {}\n current_time = datetime.now()\n\n if data:\n past_results = (\n db.session.query(Musicshows)\n .filter(Musicshows.artist_id == artist_id)\n .filter(Musicshows.start_time < current_time)\n .all()\n )\n upcoming_results = (\n db.session.query(Musicshows)\n .filter(Musicshows.artist_id == artist_id)\n .filter(Musicshows.start_time > current_time)\n .all()\n )\n\n artist_data = Artist.details(data)\n artist_data[\"past_shows\"] = list(map(Musicshows.venue_details, past_results))\n artist_data[\"upcoming_shows\"] = list(\n map(Musicshows.venue_details, upcoming_results)\n )\n artist_data[\"past_shows_count\"] = len(past_results)\n artist_data[\"upcoming_shows_count\"] = len(upcoming_results)\n\n return render_template(\"pages/show_artist.html\", artist=data)\n\n\n# Update\n# ----------------------------------------------------------------\[email protected](\"/artists/<int:artist_id>/edit\", methods=[\"GET\"])\ndef edit_artist(artist_id):\n \"\"\" populate form with fields from artist with artist_id and\n return the form for editing artist details.\n Arguments:\n artist_id {int} -- artist id\n Returns:\n form -- form fields\n \"\"\"\n form = ArtistForm()\n\n artist = Artist.query.get(artist_id)\n if artist:\n form.name.data = artist.name\n form.genres.data = artist.genres\n form.city.data = artist.city\n form.state.data = artist.state\n form.phone.data = artist.phone\n form.website_link.data = artist.website_link\n form.image_link.data = artist.image_link\n form.facebook_link.data = artist.facebook_link\n form.seeking_venue.data = artist.seeking_venue\n form.seeking_description.data = artist.seeking_description\n return render_template(\"forms/edit_artist.html\", form=form, artist=artist)\n\n\[email protected](\"/artists/<int:artist_id>/edit\", methods=[\"POST\"])\ndef edit_artist_submission(artist_id):\n \"\"\"take values from the form submitted and update\n existing artist record with artist_id\n Arguments:\n artist_id {int} -- artist id\n Returns:\n [int] -- artist id\n \"\"\"\n\n form = ArtistForm(request.form)\n artist = Artist.query.get(artist_id)\n if artist:\n if form.is_submitted():\n print(\"Artist edit Form successfully submitted\")\n if form.validate():\n print(\"Form validated\")\n try:\n setattr(artist, \"name\", request.form[\"name\"])\n setattr(artist, \"genres\", request.form.getlist(\"genres\"))\n setattr(artist, \"city\", request.form[\"city\"])\n setattr(artist, \"state\", request.form[\"state\"])\n setattr(artist, \"phone\", request.form[\"phone\"])\n setattr(artist, \"website_link\", request.form[\"website_link\"])\n setattr(artist, \"facebook_link\", request.form[\"facebook_link\"])\n setattr(artist, \"image_link\", request.form[\"image_link\"])\n setattr(\n artist, \"seeking_description\", request.form[\"seeking_description\"]\n )\n setattr(artist, \"seeking_venue\", bool(request.form[\"seeking_venue\"]))\n Artist.update(artist)\n flash(\"Edited Successfully\")\n except SQLAlchemyError as e:\n flash(\"Edit failed!!\")\n print(e)\n return render_template(\"errors/404.html\")\n return redirect(url_for(\"show_artist\", artist_id=artist_id))\n\n\[email protected](\"/venues/<int:venue_id>/edit\", methods=[\"GET\"])\ndef edit_venue(venue_id):\n \"\"\" populate form with fields from venue with venue_id and\n return the form for editing venue details.\n Arguments:\n venue_id {int} -- venue id\n Returns:\n form -- form fields\n \"\"\"\n form = VenueForm()\n venue = Venue.query.get(venue_id)\n if venue:\n form.name.data = venue.name\n form.genres.data = venue.genres\n form.address.data = venue.address\n form.city.data = venue.city\n form.state.data = venue.state\n form.phone.data = venue.phone\n form.website_link.data = venue.website_link\n form.facebook_link.data = venue.facebook_link\n form.seeking_talent.data = venue.seeking_talent\n form.seeking_description.data = venue.seeking_description\n form.image_link.data = venue.image_link\n return render_template(\"forms/edit_venue.html\", form=form, venue=venue)\n\n\[email protected](\"/venues/<int:venue_id>/edit\", methods=[\"POST\"])\ndef edit_venue_submission(venue_id):\n \"\"\"take values from the form submitted and update\n existing venue record with venue_id\n Arguments:\n venue_id {int} -- venue id\n Returns:\n [int] -- venue id\n \"\"\"\n\n form = VenueForm(request.form)\n venue = Venue.query.get(venue_id)\n if venue:\n if form.validate():\n print(\"Form validated\")\n try:\n setattr(venue, \"name\", request.form[\"name\"])\n setattr(venue, \"genres\", request.form.getlist(\"genres\"))\n setattr(venue, \"city\", request.form[\"city\"])\n setattr(venue, \"address\", request.form[\"address\"])\n setattr(venue, \"state\", request.form[\"state\"])\n setattr(venue, \"phone\", request.form[\"phone\"])\n setattr(venue, \"facebook_link\", request.form[\"facebook_link\"])\n setattr(venue, \"website_link\", request.form[\"website_link\"])\n setattr(venue, \"image_link\", request.form[\"image_link\"])\n setattr(venue, \"seeking_talent\", bool(request.form[\"seeking_talent\"]))\n setattr(\n venue, \"seeking_description\", request.form[\"seeking_description\"]\n )\n Venue.update(venue)\n flash(\"Edited Successfully\")\n except SQLAlchemyError as e:\n flash(\"Edit failed!!\")\n print(e)\n return render_template(\"errors/404.html\")\n return redirect(url_for(\"show_venue\", venue_id=venue_id))\n\n\n# Create Artist\n# ----------------------------------------------------------------\n\n\[email protected](\"/artists/create\", methods=[\"GET\"])\ndef create_artist_form():\n \"\"\"gets artist form for creating new artist\n Returns:\n form -- artist form fields\n \"\"\"\n form = ArtistForm()\n return render_template(\"forms/new_artist.html\", form=form)\n\n\[email protected](\"/artists/create\", methods=[\"POST\"])\ndef create_artist_submission():\n \"\"\"insert form data as a new artist record in db \n \"\"\"\n form = ArtistForm(request.form)\n if form.is_submitted():\n print(\"Form successfully submitted\")\n\n if form.validate_on_submit():\n try:\n new_artist = Artist(\n name=request.form[\"name\"],\n city=request.form[\"city\"],\n state=request.form[\"state\"],\n phone=request.form[\"phone\"],\n facebook_link=request.form[\"facebook_link\"],\n genres=request.form.getlist(\"genres\"),\n image_link=request.form[\"image_link\"],\n website_link=request.form[\"website_link\"],\n seeking_venue=bool(request.form[\"seeking_venue\"]),\n seeking_description=request.form[\"seeking_description\"],\n )\n\n Artist.insert(new_artist)\n flash(\"Artist\" + request.form[\"name\"] + \" was successfully listed!\")\n\n except SQLAlchemyError as e:\n db.session.rollback()\n print(e)\n flash(\"OOPS!! Artist \" + request.form[\"name\"] + \" was not listed!\")\n finally:\n db.session.close()\n return render_template(\"pages/home.html\")\n\n\n# Shows\n# ----------------------------------------------------------------\n\n\[email protected](\"/shows\")\ndef shows():\n \"\"\"display list of shows \n Returns:\n dictionary -- show details\n \"\"\"\n\n shows_data = {}\n data = db.session.query(Musicshows).all()\n\n shows_data = Musicshows.details(data)\n\n return render_template(\"pages/shows.html\", shows=shows_data)\n\n\[email protected](\"/shows/create\")\ndef create_shows():\n \"\"\"renders form for registering a show.\n Returns:\n form -- show form fields\n \"\"\"\n form = ShowForm()\n return render_template(\"forms/new_show.html\", form=form)\n\n\[email protected](\"/shows/create\", methods=[\"POST\"])\ndef create_show_submission():\n \"\"\"called to create new show in db.\n upon submitting, insert form data as a new show\n \"\"\"\n form = ShowForm(request.form)\n if form.validate_on_submit():\n try:\n new_show = Musicshows(\n artist_id=request.form[\"artist_id\"],\n venue_id=request.form[\"venue_id\"],\n start_time=request.form[\"start_time\"],\n )\n Musicshows.insert(new_show)\n except SQLAlchemyError as e:\n db.session.rollback()\n print(e)\n flash(\"An error occurred. Show could not be listed.\")\n return render_template(\"pages/home.html\")\n\n\[email protected](404)\ndef not_found_error(error):\n return render_template(\"errors/404.html\"), 404\n\n\[email protected](500)\ndef server_error(error):\n return render_template(\"errors/500.html\"), 500\n\n\nif not app.debug:\n file_handler = FileHandler(\"error.log\")\n file_handler.setFormatter(\n Formatter(\"%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]\")\n )\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info(\"errors\")\n\n# ----------------------------------------------------------------------------#\n# Launch.\n# ----------------------------------------------------------------------------#\n\n# Default port:\nif __name__ == \"__main__\":\n app.run()\n\n# Or specify port manually:\n\"\"\"\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n\"\"\"\n\n" }, { "alpha_fraction": 0.6002724766731262, "alphanum_fraction": 0.6160358190536499, "avg_line_length": 34.93356704711914, "blob_id": "8dc9ce8cbb9178b84c5922c3ef56348d456c9c03", "content_id": "9a1849d8acf69cf2f56762127216e8011050831d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10277, "license_type": "no_license", "max_line_length": 89, "num_lines": 286, "path": "/capstone project/starter/test_app.py", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\nfrom app import create_app\nfrom models import setup_db, Actor, Movie, db_insert_records\nfrom config import bearer_tokens\nfrom sqlalchemy import desc\nfrom datetime import date\n\nassistant_auth = {\n \"Authorization\": bearer_tokens[\"casting_assistant\"]\n }\ndirector_auth_header = {\n \"Authorization\": bearer_tokens[\"casting_director\"]\n }\nproducer_auth_header = {\n \"Authorization\": bearer_tokens[\"executive_producer\"]\n }\n\nnew_actor = {\"name\": \"ryan gosling\", \"age\": 35, \"gender\": \"Male\"}\nnew_movie = {\"title\": \"Lagaan\", \"release_date\": date.today()}\n\n\nclass CastingTestCase(unittest.TestCase):\n def setUp(self):\n\n self.app = create_app()\n self.client = self.app.test_client\n database_path = os.environ[\"DATABASE_URL\"]\n setup_db(self.app, database_path)\n # binds the app to the current context\n\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after each test\"\"\"\n pass\n\n # tests for POST /actors\n\n def test_add_actor(self):\n # add new actors: success scenario\n res = self.client().post(\n \"/actors\", json=new_actor, headers=director_auth_header\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data[\"success\"])\n self.assertEqual(data[\"actor\"], 2)\n\n def test_add_actor_401(self):\n # add new actor: failure ,without authorization header\n res = self.client().post(\"/actors\", json=new_actor)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"Authorization header is expected.\")\n\n def test_add_actor_400(self):\n # add new actor: failure , without 'name' in json\n this_actor = {\"age\": 45}\n res = self.client().post(\n \"/actors\", json=this_actor, headers=director_auth_header\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 400)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], 'Please add \"name\" in the json')\n\n # tests for GET /actors\n def test_get_actors(self):\n # get actors at page=1\n res = self.client().get(\"/actors?page=1\", headers=assistant_auth)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data[\"success\"])\n self.assertEqual(len(data[\"actors\"]), 2)\n\n def test_get_actors_404(self):\n # get actors at page=100\n res = self.client().get(\"/actors?page=100\", headers=assistant_auth)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"OOPS! No actors willing to work\")\n\n def test_get_actors_401(self):\n # get actors: failure ,without authorization header\n res = self.client().get(\"/actors\")\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"Authorization header is expected.\")\n\n # tests for PATCH /actors\n def test_modify_actors(self):\n # update an actor , sending id and json\n this_actor = {\"name\": \"Priyanka Chopra\"}\n res = self.client().patch(\n \"/actors/1\", json=this_actor, headers=producer_auth_header\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data[\"success\"])\n self.assertEqual(data[\"actor\"], 1)\n\n def test_modify_actors_400(self):\n # update an actor , not sending json\n res = self.client().patch(\"/actors/1\", headers=producer_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 400)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"there is no json body\")\n\n def test_modify_actors_403(self):\n # update an actor ,sending assistant header(doesn't contain required permissions)\n this_actor = {\"name\": \"Priyanka Chopra\"}\n res = self.client().patch(\"/actors/1\", json=this_actor, headers=assistant_auth)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 403)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"Permission not found.\")\n\n # tests for DELETE /actors\n def test_delete_actor_404(self):\n # delete an actor , failure scenario: incorrect actor_id\n res = self.client().delete(\"/actors/10\", headers=producer_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"No actor with this id\")\n\n def test_delete_actor_401(self):\n # delete an actor , without headers\n res = self.client().delete(\"/actors/1\")\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"Authorization header is expected.\")\n\n def test_delete_actor(self):\n # delete an actor:success scenario\n res = self.client().delete(\"/actors/1\", headers=producer_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data[\"success\"])\n self.assertEqual(data[\"actor\"], 1)\n\n # tests for POST /movies\n\n def test_add_movie(self):\n # add new movies: success scenario\n res = self.client().post(\n \"/movies\", json=new_movie, headers=producer_auth_header\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data[\"success\"])\n self.assertEqual(data[\"movie\"], 2)\n\n def test_add_movie_401(self):\n # add new movie: failure ,without authorization header\n res = self.client().post(\"/movies\", json=new_movie)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"Authorization header is expected.\")\n\n def test_add_movie_400(self):\n # add new actor: failure , without json\n res = self.client().post(\"/movies\", headers=producer_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 400)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"there is no json body\")\n\n # tests for GET /movies\n def test_get_movies(self):\n # get movies at page=1\n res = self.client().get(\"/movies?page=1\", headers=assistant_auth)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data[\"success\"])\n self.assertEqual(len(data[\"movies\"]), 2)\n\n def test_get_movies_404(self):\n # get movies at page=100\n res = self.client().get(\"/movies?page=100\", headers=assistant_auth)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"OOPS! No one is making movies\")\n\n def test_get_movies_401(self):\n # get movies: failure ,without authorization header\n res = self.client().get(\"/movies\")\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 401)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"Authorization header is expected.\")\n\n # tests for PATCH /movies\n def test_modify_movies(self):\n # update an movie , sending id and json\n this_movie = {\"title\": \"mahabharta\"}\n res = self.client().patch(\n \"/movies/1\", json=this_movie, headers=producer_auth_header\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data[\"success\"])\n self.assertEqual(data[\"movie\"], 1)\n\n def test_modify_movies_400(self):\n # update an movie , not sending json\n res = self.client().patch(\"/movies/1\", headers=producer_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 400)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"there is no json body\")\n\n def test_modify_movies_403(self):\n # update an movie , sending assistant header\n this_movie = {\"title\": \"mahabharta\"}\n res = self.client().patch(\"/movies/1\", json=this_movie, headers=assistant_auth)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 403)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"Permission not found.\")\n\n # tests for DELETE /movies\n def test_delete_movie_404(self):\n # delete an movie , failure scenario: incorrect movie_id\n res = self.client().delete(\"/movies/10\", headers=producer_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"No movie with this id\")\n\n def test_delete_movie_403(self):\n # delete an actor , failure senario, sending director header\n res = self.client().delete(\"/movies/1\", headers=director_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 403)\n self.assertFalse(data[\"success\"])\n self.assertEqual(data[\"message\"], \"Permission not found.\")\n\n def test_delete_movie(self):\n # delete an movie:success scenario\n res = self.client().delete(\"/movies/1\", headers=producer_auth_header)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data[\"success\"])\n self.assertEqual(data[\"movie\"], 1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" }, { "alpha_fraction": 0.5277588963508606, "alphanum_fraction": 0.5395683646202087, "avg_line_length": 29.065305709838867, "blob_id": "ddbb2e52aa8377a57def7ee84fddfa9a50ee137b", "content_id": "82c0d3b1a1af8382367fb079580064015e3c8d67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7367, "license_type": "no_license", "max_line_length": 88, "num_lines": 245, "path": "/trivia_api/starter/backend/flaskr/__init__.py", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask, request, abort, jsonify, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nimport random\nfrom models import setup_db, Question, Category\n\nQUESTIONS_PER_PAGE = 10\n\n\ndef paginate_questions(request, results):\n \"\"\"Groups results in group of 10 on each page\n Arguments:\n request {int} -- page number\n results {[type]} -- list\n \n Returns:\n list -- list of questions on current page\n \"\"\"\n page = request.args.get(\"page\", 1, type=int)\n start = (page - 1) * QUESTIONS_PER_PAGE\n end = start + QUESTIONS_PER_PAGE\n\n questions = [result.format() for result in results]\n current_questions = questions[start:end]\n return current_questions\n\n\ndef create_app(test_config=None):\n \"\"\"create and configure the app\n Keyword Arguments:\n test_config --(default: {None})\n Returns:\n [type] -- flask app\n \"\"\"\n app = Flask(__name__)\n setup_db(app)\n CORS(app, resources={\"r/*\": {\"origins\": \"*\"}})\n\n @app.after_request\n def after_request(response):\n \"\"\"\n set Access-Control-Allow\n \"\"\"\n response.headers.add(\n \"Access-Control-Allow-Headers\", \"Content-Type,Authorization,true\"\n )\n response.headers.add(\n \"Access-Control-Allow-Methods\", \"GET,POST,DELETE,PUT,OPTIONS\"\n )\n return response\n\n @app.route(\"/categories\")\n def list_categories():\n \"\"\" handle GET requests for all\n available categories\n Returns:\n dictionary: categories\n \"\"\"\n categories = Category.query.all()\n categories = {category.id: category.type for category in categories}\n\n return (jsonify({\"success\": True, \"categories\": categories}), 200)\n\n @app.route(\"/questions\")\n def list_questions():\n \"\"\" handle GET requests for all\n available questions on current page\n Returns:\n json: questions, categories, number of questions\n \"\"\"\n questions = Question.query.all()\n current_questions = paginate_questions(request, questions)\n categories = Category.query.all()\n all_categories = {category.id: category.type for category in categories}\n\n if len(current_questions) == 0:\n abort(404)\n\n return (\n jsonify(\n {\n \"success\": True,\n \"questions\": current_questions,\n \"total_questions\": len(current_questions),\n \"categories\": all_categories,\n }\n ),\n 200,\n )\n\n @app.route(\"/questions/<int:question_id>\", methods=[\"DELETE\"])\n def delete_question(question_id):\n \"\"\" delete the record from Question model with\n question_id\n Arguments:\n question_id {int} : question id\n Returns:\n json: success value\n \"\"\"\n question = Question.query.get(question_id)\n if question is None:\n abort(404)\n question.delete()\n\n return (jsonify({\"success\": True}), 200)\n\n @app.route(\"/questions\", methods=[\"POST\"])\n def create_question():\n \"\"\" post a new question\n Returns:\n json: success value and new question\n \"\"\"\n body = request.get_json()\n\n new_question = body.get(\"question\")\n new_answer = body.get(\"answer\")\n new_category = body.get(\"category\")\n new_difficulty = body.get(\"difficulty\")\n\n try:\n if not (\n (new_question) and (new_answer) and (new_category) and (new_difficulty)\n ):\n abort(400)\n insert_question = Question(\n question=new_question,\n answer=new_answer,\n category=new_category,\n difficulty=new_difficulty,\n )\n insert_question.insert()\n\n return jsonify({\"success\": True, \"question\": insert_question.format()}), 200\n except:\n abort(422)\n\n @app.route(\"/search\", methods=[\"POST\"])\n def search_term():\n \"\"\" get questions based on search term\n Returns:\n json: questions and number of questions\n \"\"\"\n body = request.get_json()\n searchTerm = body.get(\"searchTerm\")\n\n try:\n search_results = Question.query.filter(\n Question.question.ilike(\"%\" + searchTerm + \"%\")\n ).all()\n current_questions = paginate_questions(request, search_results)\n num_of_questions = len(current_questions)\n\n if num_of_questions == 0:\n abort(404)\n\n return (\n jsonify(\n {\n \"success\": True,\n \"questions\": current_questions,\n \"totalQuestions\": len(current_questions),\n \"currentCategory\": None,\n }\n ),\n 200,\n )\n except:\n abort(422)\n\n @app.route(\"/categories/<int:category_id>/questions\", methods=[\"GET\"])\n def category_questions(category_id):\n \"\"\" get questions of the given category\n category_id\n Arguments:\n category_id {int} : category id\n Returns:\n json: questions, number of questions\n \"\"\"\n this_questions = Question.query.filter_by(category=category_id).all()\n questions = paginate_questions(request, this_questions)\n\n if len(this_questions) == 0:\n abort(404)\n\n return (\n jsonify(\n {\n \"success\": True,\n \"questions\": questions,\n \"total_questions\": len(this_questions),\n \"current_category\": category_id,\n }\n ),\n 200,\n )\n\n @app.route(\"/quizzes\", methods=[\"POST\"])\n def play_quiz():\n \"\"\" get questions to play quiz\n Arguments:\n json body : previuos question list, category_id\n Returns:\n json: questions\n \"\"\"\n body = request.get_json()\n previous_questions = body.get(\"previous_questions\")\n quiz_category = body.get(\"quiz_category\")\n quiz_category_id = int(quiz_category[\"id\"])\n\n question = Question.query.filter(Question.id.notin_(previous_questions))\n\n if quiz_category_id:\n question = question.filter_by(category=quiz_category_id)\n\n question = question.first().format()\n\n return jsonify({\"success\": True, \"question\": question}), 200\n\n @app.errorhandler(404)\n def not_found(error):\n return (\n jsonify({\"success\": False, \"error\": 404, \"message\": \"resource not found\"}),\n 404,\n )\n\n @app.errorhandler(422)\n def unprocessable(error):\n return (\n jsonify({\"success\": False, \"message\": \"unprocessable\", \"error\": 422}),\n 422,\n )\n\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\"success\": False, \"message\": \"bad request\", \"error\": 400}), 400\n\n @app.errorhandler(405)\n def method_not_allowed(error):\n return (\n jsonify({\"success\": False, \"message\": \"method not allowed\", \"error\": 405}),\n 405,\n )\n\n return app\n\n" }, { "alpha_fraction": 0.5758099555969238, "alphanum_fraction": 0.5828653573989868, "avg_line_length": 26.446640014648438, "blob_id": "e6b1814be1468ce7a9accc7bb9a14e1c5f6bb308", "content_id": "facb7322b4e64a5484f668dd94e30380109e105a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6945, "license_type": "no_license", "max_line_length": 87, "num_lines": 253, "path": "/fyyur-music-venue-booking-site/models.py", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "import os\nfrom sqlalchemy import Column, String, Integer, DateTime, ForeignKey, Boolean, ARRAY\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nimport json\n\n\ndb = SQLAlchemy()\n\n\ndef setup_db(app):\n \"\"\"\n binds a flask application and a SQLAlchemy service\n \"\"\"\n app.config.from_object(\"config\")\n db = SQLAlchemy(app)\n migrate = Migrate(app, db)\n db.app = app\n db.init_app(app)\n return db\n\n \"\"\"\n drop_all()\n create_all()\n The above two lines drop the database tables and start fresh,\n can be used to initialize a clean database\n ** No need of above to lines, if flask-migrate is used.\n flask_migrate creates versions and automatically creates and updates table schemas.\n \"\"\"\n\n\n# All models, their relationships and properties\n\n\"\"\"\nAn entity to create instances of venues \nwhere artists perform.\nExtends the base SQLAlchemy model\n\"\"\"\n\n\nclass Venue(db.Model):\n __tablename__ = \"venue\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n city = Column(String(120), nullable=False)\n state = Column(String(120), nullable=False)\n address = Column(String(120), nullable=False)\n phone = Column(String(120), nullable=False)\n image_link = Column(String(500))\n facebook_link = Column(String(120))\n website_link = Column(String(500))\n seeking_talent = Column(Boolean)\n seeking_description = Column(String(100))\n genres = Column(ARRAY(String(50)), nullable=True)\n shows = db.relationship(\"Musicshows\", backref=\"venue\", lazy=\"dynamic\")\n\n def __init__(\n self,\n name,\n genres,\n address,\n city,\n state,\n phone,\n website_link,\n facebook_link,\n image_link,\n seeking_talent=False,\n seeking_description=\"\",\n ):\n self.name = name\n self.genres = genres\n self.address = address\n self.city = city\n self.state = state\n self.phone = phone\n self.website = website_link\n self.facebook_link = facebook_link\n self.seeking_talent = seeking_talent\n self.seeking_description = seeking_description\n self.image_link = image_link\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def short(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n }\n\n def long(self):\n print(self)\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"city\": self.city,\n \"state\": self.state,\n }\n\n def details(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"genres\": self.genres,\n \"address\": self.address,\n \"city\": self.city,\n \"state\": self.state,\n \"phone\": self.phone,\n \"website_link\": self.website_link,\n \"facebook_link\": self.facebook_link,\n \"seeking_talent\": self.seeking_talent,\n \"seeking_description\": self.seeking_description,\n \"image_link\": self.image_link,\n }\n\n\n\"\"\"\nAn entity to create instances of artists,\nthe people who perform.\nExtends the base SQLAlchemy model\n\"\"\"\n\n\nclass Artist(db.Model):\n __tablename__ = \"artist\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n city = Column(String(120), nullable=False)\n state = Column(String(120), nullable=False)\n phone = Column(String(120), nullable=False)\n image_link = Column(String(500))\n facebook_link = Column(String(120))\n website_link = Column(String(500))\n seeking_venue = Column(Boolean)\n seeking_description = Column(String(100))\n genres = Column(ARRAY(String(50)), nullable=True)\n shows = db.relationship(\"Musicshows\", backref=\"artist\", lazy=\"dynamic\")\n\n def __init__(\n self,\n name,\n genres,\n city,\n state,\n phone,\n image_link,\n website_link,\n facebook_link,\n seeking_venue=False,\n seeking_description=\"\",\n ):\n self.name = name\n self.genres = genres\n self.city = city\n self.state = state\n self.phone = phone\n self.website_link = website_link\n self.facebook_link = facebook_link\n self.seeking_venue = seeking_venue\n self.seeking_description = seeking_description\n self.image_link = image_link\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def short(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n }\n\n def details(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"genres\": self.genres,\n \"city\": self.city,\n \"state\": self.state,\n \"phone\": self.phone,\n \"website_link\": self.website_link,\n \"facebook_link\": self.facebook_link,\n \"seeking_venue\": self.seeking_venue,\n \"seeking_description\": self.seeking_description,\n \"image_link\": self.image_link,\n }\n\n\n\"\"\"\nMusicshows\nAn entity to create instances of music shows,\nnew instance of show is created when an artist books a venue.\nIt has start_time attribute along with venue_id and artist_id.\nExtends the base SQLAlchemy model\n\"\"\"\n\n\nclass Musicshows(db.Model):\n __tablename__ = \"musicshows\"\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n artist_id = Column(Integer, ForeignKey(\"artist.id\"), primary_key=True)\n venue_id = Column(Integer, ForeignKey(\"venue.id\"), primary_key=True)\n start_time = Column(DateTime(), nullable=False)\n\n def __init__(self, venue_id, artist_id, start_time):\n self.venue_id = venue_id\n self.artist_id = artist_id\n self.start_time = start_time\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def details(self):\n return {\n \"venue_id\": self.venue_id,\n \"venue_name\": self.venue.name,\n \"artist_id\": self.artist_id,\n \"artist_name\": self.artist.name,\n \"artist_image_link\": self.artist.image_link,\n \"start_time\": self.start_time,\n }\n\n def artist_details(self):\n return {\n \"artist_id\": self.artist_id,\n \"artist_name\": self.artist.name,\n \"artist_image_link\": self.artist.image_link,\n \"start_time\": self.start_time.strftime(\"%m/%d/%Y, %H:%M:%S\"),\n }\n\n def venue_details(self):\n return {\n \"venue_id\": self.venue_id,\n \"venue_name\": self.venue.name,\n \"venue_image_link\": self.venue.image_link,\n \"start_time\": self.start_time.strftime(\"%m/%d/%Y, %H:%M:%S\"),\n }\n\n" }, { "alpha_fraction": 0.698952853679657, "alphanum_fraction": 0.7146596908569336, "avg_line_length": 33.818180084228516, "blob_id": "40ae3079404e2bea09c8686100124a375ad320e8", "content_id": "2a966a8813238ba62e0f206d86307470d0dcb8a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 107, "num_lines": 11, "path": "/fyyur-music-venue-booking-site/config.py", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "import os\nSECRET_KEY = os.urandom(32)\n# Grabs the folder where the script runs.\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# Enable debug mode.\nDEBUG = True\n\n# Connect to a local postgresql database server: DATABASE URL -->\nSQLALCHEMY_DATABASE_URI = \"postgres://{}:{}@{}/{}\".format('jaishree','password','localhost:5432','fyyur')\nSQLALCHEMY_TRACK_MODIFICATIONS = False" }, { "alpha_fraction": 0.7771052718162537, "alphanum_fraction": 0.7821052670478821, "avg_line_length": 46.474998474121094, "blob_id": "99caa8e6a294bdf845ef7155c93f65be697a498d", "content_id": "defd1df6008bf31754d4a272710c8feddf11dcc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3800, "license_type": "no_license", "max_line_length": 205, "num_lines": 80, "path": "/README.md", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "# Full-Stack Development projects\n\nSkills and topics:\n1. SQL and Data Modeling for the web\n2. API Development and Documentation\n3. Identity and Access Management\n4. Server Deployment, Containerization and Testing\n\nAll the above mentioned skills are helpful in developing a full-fledged web application.\n\nAs a result, I have acquired huge amount of knowledge in following areas:\n\n- Databases: **Postgresql**, **SQLAlchemy ORM**\n- API development: **Flask**, **Python3** (server language and server framework)\n- Schema migrations: **Flask-Migrate**\n- To handle cross origin requests: **Flask-CORS**\n- Third-party authentication: **Auth0**\n- Authentication and Authorization\n- Javascript object signing and encryption of JWTs: **jose**\n- TDD and writing tests: **unittest**\n- Containerization and deployment: **docker** and **kubernetes**\n- Documenting API endpoints\n\n## Project details\n\nThe projects in this repository\n\n1. **Fyyur: Artist Booking Site**\n\n- Implement data models in relational, normalized form. Relationship between models correctly defined.\n- Connect models to database.\n- Demonstrate a good grasp of SQLAlchemy.\n- Demonstrate the ability to construct a well-organized code base.\n- Following api endpoints should work : list venues, list artists, show venue details, show artist details, post a new venue,\npost a new artist(cannot submit invalid form submission), create a show, list all upcoming shows, list all past shows, search venues, search artists.\n- **Frontend**: **HTML, CSS, Bootstrap**\n\nInstallation, development setup, documentation and source code can seen here: [project link](https://github.com/JaishreeJanu/Udacity-full-stack-developer-program/tree/master/fyyur-music-venue-booking-site)\n\n2. **trivia-api**\n\n- Handling HTTP request.\n- Writing API endpoints and interacting with database.\n- Utilize multiple HTTP request methods.\n- Handle common errors.\n- Use unittest to test flask application for expected behaviour and validate API endpoints\n- **Frontend**: **Reactjs**\nDevelopment setup, API documentation and code can seen here: [project link](https://github.com/JaishreeJanu/Udacity-full-stack-developer-program/tree/master/trivia_api/starter)\n\n3. **Coffee shop app**\n\n- The project demonstrates an understanding of restful APIs\n- Secure a REST API for applications\n- Understanding third-party authentication system\n- Understanding JWTs and Role based Authentication\n- The code adheres to [PEP 8 style guide](https://www.python.org/dev/peps/pep-0008/) and follows common best practices.\n- **Frontend**: **HTML, CSS, Bootstrap**\n\nDevelopment setup, API documentation and code can seen here: [project link](https://github.com/JaishreeJanu/Udacity-full-stack-developer-program/tree/master/coffee_shop/starter_code)\n\n4. **Deploying flask app on kubernetes**\n\n- Containerize and run app locally.\n- Create EKS cluster and IAM role.\n- Deployment to kubernetes using CodePipeline and CodeBuild.\n- Adding tests to the build.\n\nSource code for this projects can be seen here: [project link](https://github.com/JaishreeJanu/deployment-on-kubernetes)\n\n5. **Capstone project: Casting Agency**\n\n- Architect relational database models in python.\n- Utilize SQLAlchemy to conduct database queries\n- Follow RESTful principles of API development.\n- Enable Role Based Authentication and roles-based access control (RBAC) in a Flask application\n- Demonstrate validity of API behavior\n- Application is hosted live at heroku: [live application](https://final-capstone-udacity.herokuapp.com/actors) \n- The application has been documented well. The code adheres to PEP 8 style guide.\n\nDevelopment setup, documentation and source code of this application can be seen here: [project link](https://github.com/JaishreeJanu/Udacity-full-stack-developer-program/tree/master/capstone%20project)\n\n\n" }, { "alpha_fraction": 0.6417251229286194, "alphanum_fraction": 0.6562743782997131, "avg_line_length": 28.16666603088379, "blob_id": "f8c05101df4ece8e48c900f36b7f65bdb0a3bd2d", "content_id": "e256593da71af67d7af658bb0e75a519d8a9b17c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3849, "license_type": "no_license", "max_line_length": 83, "num_lines": 132, "path": "/coffee_shop/starter_code/backend/src/api.py", "repo_name": "JaishreeJanu/Udacity-full-stack-developer-program", "src_encoding": "UTF-8", "text": "import os\nfrom flask import Flask, request, jsonify, abort\nfrom sqlalchemy import exc\nimport json\nfrom flask_cors import CORS\n\nfrom .database.models import db_drop_and_create_all, setup_db, Drink\nfrom .auth.auth import AuthError, requires_auth\n\napp = Flask(__name__)\nsetup_db(app)\nCORS(app)\ndb_drop_and_create_all()\n\n## ROUTES\n\n\[email protected](\"/drinks\", methods=[\"GET\"])\ndef get_drinks_menu():\n \"\"\"should get only brief description of the drink\n Returns:\n status code 200 and json {\"success\": True, \"drinks\": list_of_drinks}\n \"\"\"\n drink_results = Drink.query.all()\n drinks = [Drink.short(drink) for drink in drink_results]\n\n return jsonify({\"success\": True, \"drinks\": drinks}), 200\n\n\[email protected](\"/drinks-detail\")\n@requires_auth(\"get:drinks-detail\")\ndef get_drinks_detail(token):\n \"\"\"it should require the 'get:drinks-detail' permission\n it should contain the drink.long() data representation\n Returns:\n status code 200 and json {\"success\": True, \"drinks\": list_of_drinks}\n \"\"\"\n drink_results = Drink.query.all()\n drinks = [Drink.long(drink) for drink in drink_results]\n return jsonify({\"success\": True, \"drinks\": drinks}), 200\n\[email protected](\"/drinks\", methods=[\"POST\"])\n@requires_auth(\"post:drinks\")\ndef create_drink(token):\n \"\"\" it should create a new row in the drinks table\n it should require the 'post:drinks' permission\n it should contain the drink.long() data representation\n Returns:\n status code 200 and json {\"success\": True, \"drinks\": new_drink}\n \"\"\"\n body = json.loads(request.data.decode(\"utf-8\"))\n new_drink = Drink(title=body[\"title\"], recipe=json.dumps(body[\"recipe\"]))\n Drink.insert(new_drink)\n\n return jsonify({\"success\": True, \"drinks\": Drink.long(new_drink)}), 200\n\n\[email protected](\"/drinks/<int:drink_id>\", methods=[\"PATCH\"])\n@requires_auth(\"patch:drinks\")\ndef update_drink(token, drink_id):\n \"\"\"\n it should respond with a 404 error if <id> is not found\n it should update the corresponding row for <id>\n it should require the 'patch:drinks' permission\n it should contain the drink.long() data representation\n Arguments:\n drink_id {int}: drink id\n Returns:\n status code 200 and json {\"success\": True, \"drinks\": updated_drink}\n \"\"\"\n data = json.loads(request.data.decode(\"utf-8\"))\n this_drink = Drink.query.get(drink_id)\n if not this_drink:\n abort(404)\n\n if \"title\" in data:\n this_drink.title = data[\"title\"]\n\n if \"recipe\" in data:\n this_drink.recipe = data[\"recipe\"]\n\n Drink.update(this_drink)\n\n return jsonify({\"success\": True, \"drinks\": Drink.long(this_drink)}), 200\n\n\[email protected](\"/drinks/<int:drink_id>\", methods=[\"DELETE\"])\n@requires_auth(\"delete:drinks\")\ndef delete_drink(token, drink_id):\n \"\"\"it should respond with a 404 error if <id> is not found\n it should delete the corresponding row for <id>\n it should require the 'delete:drinks' permission\n \n Arguments:\n token {string} -- token\n drink_id {[=int} -- drink id\n \n Returns:\n json -- {\"success\": True, \"delete\": id}\n \"\"\"\n drink = Drink.query.get(drink_id)\n Drink.delete(drink)\n\n return jsonify({\"success\": True, \"delete\": drink_id}), 200\n\n\n## Error Handling\n\n\[email protected](422)\ndef unprocessable(error):\n return (\n jsonify({\"success\": False, \"error\": 422, \"message\": \"unprocessable\"}),\n 422,\n )\[email protected](404)\ndef resouce_not_found(error):\n return (\n jsonify({\"success\": False, \"error\": 404, \"message\": \"resource not found\"}),\n 404,\n )\n\n\n\"\"\"implement error handler for AuthError\nerror handler should conform to general task above \n\"\"\"\n\[email protected](AuthError)\ndef authentification_failed(error):\n return jsonify(\n {\"success\": False, \"error\": error.status_code, \"message\": error.error}\n )" } ]
12
linuxer9/device-driver-manager
https://github.com/linuxer9/device-driver-manager
68cda7f1089cc33080e76ceb3026ecf8634dfb03
90e28905e3a03b827b6d648f8fe726389b6f1fbc
edb7ae48eb07bdd6f43bebb2b19a3239b823a67a
refs/heads/master
2021-01-16T20:01:38.736549
2013-01-13T10:29:45
2013-01-13T10:29:45
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.32132065296173096, "alphanum_fraction": 0.5189823508262634, "avg_line_length": 31.1647891998291, "blob_id": "5cf1d95309f35f7aa7f1c2b001ca6865b1abb4d3", "content_id": "0328619b71871398cc474e2f1e7be06718aa3690", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22837, "license_type": "no_license", "max_line_length": 86, "num_lines": 710, "path": "/usr/lib/device-driver-manager/nvidia_gpus.py", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n# Lists with supported Nvidia GPU's per driver series\n# http://us.download.nvidia.com/XFree86/Linux-x86_64/304.64/README/supportedchips.html\n\n# Driver packages in Ubuntu\n# 304 = nvidia-current\n# 173 = nvidia-173\n# 96 = nvidai-96\n# 71 = not supported\n\n\ndef checkNvidiaID(pciId):\n foundGpu = []\n pciId = pciId.upper()\n gpus = getNvidia_304()\n for gpu in gpus:\n if pciId in gpu[1]:\n foundGpu.append(304)\n foundGpu.append('Nvidia ' + gpu[0])\n foundGpu.append(pciId)\n break\n if not foundGpu:\n gpus = getNvidia_173()\n for gpu in gpus:\n if pciId in gpu[1]:\n foundGpu.append(173)\n foundGpu.append('Nvidia ' + gpu[0])\n foundGpu.append(pciId)\n break\n if not foundGpu:\n gpus = getNvidia_96()\n for gpu in gpus:\n if pciId in gpu[1]:\n foundGpu.append(96)\n foundGpu.append('Nvidia ' + gpu[0])\n foundGpu.append(pciId)\n break\n if not foundGpu:\n gpus = getNvidia_71()\n for gpu in gpus:\n if pciId in gpu[1]:\n foundGpu.append(71)\n foundGpu.append('Nvidia ' + gpu[0])\n foundGpu.append(pciId)\n break\n return foundGpu\n\n\ndef getNvidia_304():\n lst = [\n ['GeForce 6800 Ultra', '0040'],\n ['GeForce 6800', '0041'],\n ['GeForce 6800 LE', '0042'],\n ['GeForce 6800 XE', '0043'],\n ['GeForce 6800 XT', '0044'],\n ['GeForce 6800 GT', '0045'],\n ['GeForce 6800 GT', '0046'],\n ['GeForce 6800 GS', '0047'],\n ['GeForce 6800 XT', '0048'],\n ['GeForce 7800 GTX', '0090'],\n ['GeForce 7800 GTX', '0091'],\n ['GeForce 7800 GT', '0092'],\n ['GeForce 7800 GS', '0093'],\n ['GeForce 7800 SLI', '0095'],\n ['GeForce Go 7800', '0098'],\n ['GeForce Go 7800 GTX', '0099'],\n ['GeForce 6800 GS', '00C0'],\n ['GeForce 6800', '00C1'],\n ['GeForce 6800 LE', '00C2'],\n ['GeForce 6800 XT', '00C3'],\n ['GeForce Go 6800', '00C8'],\n ['GeForce Go 6800 Ultra', '00C9'],\n ['GeForce 6600 GT', '00F1'],\n ['GeForce 6600', '00F2'],\n ['GeForce 6200', '00F3'],\n ['GeForce 6600 LE', '00F4'],\n ['GeForce 7800 GS', '00F5'],\n ['GeForce 6800 GS', '00F6'],\n ['GeForce 6800 Ultra', '00F9'],\n ['GeForce 6600 GT', '0140'],\n ['GeForce 6600', '0141'],\n ['GeForce 6600 LE', '0142'],\n ['GeForce 6600 VE', '0143'],\n ['GeForce Go 6600', '0144'],\n ['GeForce 6610 XL', '0145'],\n ['GeForce Go 6600 TE/6200 TE', '0146'],\n ['GeForce 6700 XL', '0147'],\n ['GeForce Go 6600', '0148'],\n ['GeForce Go 6600 GT', '0149'],\n ['GeForce 6200', '014F'],\n ['GeForce 6500', '0160'],\n ['GeForce 6200 TurboCache(TM)', '0161'],\n ['GeForce 6200SE TurboCache(TM)', '0162'],\n ['GeForce 6200 LE', '0163'],\n ['GeForce Go 6200', '0164'],\n ['GeForce Go 6400', '0166'],\n ['GeForce Go 6200', '0167'],\n ['GeForce Go 6400', '0168'],\n ['GeForce 6250', '0169'],\n ['GeForce 7100 GS', '016A'],\n ['GeForce 8800 GTX', '0191'],\n ['GeForce 8800 GTS', '0193'],\n ['GeForce 8800 Ultra', '0194'],\n ['GeForce 7350 LE', '01D0'],\n ['GeForce 7300 LE', '01D1'],\n ['GeForce 7550 LE', '01D2'],\n ['GeForce 7300 SE/7200 GS', '01D3'],\n ['GeForce Go 7200', '01D6'],\n ['GeForce Go 7300', '01D7'],\n ['GeForce Go 7400', '01D8'],\n ['GeForce 7500 LE', '01DD'],\n ['GeForce 7300 GS', '01DF'],\n ['GeForce 6200', '0221'],\n ['GeForce 6200 A-LE', '0222'],\n ['GeForce 6150', '0240'],\n ['GeForce 6150 LE', '0241'],\n ['GeForce 6100', '0242'],\n ['GeForce Go 6150', '0244'],\n ['GeForce Go 6100', '0247'],\n ['GeForce 7900 GTX', '0290'],\n ['GeForce 7900 GT/GTO', '0291'],\n ['GeForce 7900 GS', '0292'],\n ['GeForce 7950 GX2', '0293'],\n ['GeForce 7950 GX2', '0294'],\n ['GeForce 7950 GT', '0295'],\n ['GeForce Go 7950 GTX', '0297'],\n ['GeForce Go 7900 GS', '0298'],\n ['GeForce 7600 GT', '02E0'],\n ['GeForce 7600 GS', '02E1'],\n ['GeForce 7300 GT', '02E2'],\n ['GeForce 7900 GS', '02E3'],\n ['GeForce 7950 GT', '02E4'],\n ['GeForce 7650 GS', '038B'],\n ['GeForce 7650 GS', '0390'],\n ['GeForce 7600 GT', '0391'],\n ['GeForce 7600 GS', '0392'],\n ['GeForce 7300 GT', '0393'],\n ['GeForce 7600 LE', '0394'],\n ['GeForce 7300 GT', '0395'],\n ['GeForce Go 7700', '0397'],\n ['GeForce Go 7600', '0398'],\n ['GeForce Go 7600 GT', '0399'],\n ['GeForce 6150SE nForce 430', '03D0'],\n ['GeForce 6100 nForce 405', '03D1'],\n ['GeForce 6100 nForce 400', '03D2'],\n ['GeForce 6100 nForce 420', '03D5'],\n ['GeForce 7025 / nForce 630a', '03D6'],\n ['GeForce 8600 GTS', '0400'],\n ['GeForce 8600 GT', '0401'],\n ['GeForce 8600 GT', '0402'],\n ['GeForce 8600 GS', '0403'],\n ['GeForce 8400 GS', '0404'],\n ['GeForce 9500M GS', '0405'],\n ['GeForce 8300 GS', '0406'],\n ['GeForce 8600M GT', '0407'],\n ['GeForce 9650M GS', '0408'],\n ['GeForce 8700M GT', '0409'],\n ['GeForce GT 330', '0410'],\n ['GeForce 8400 SE', '0420'],\n ['GeForce 8500 GT', '0421'],\n ['GeForce 8400 GS', '0422'],\n ['GeForce 8300 GS', '0423'],\n ['GeForce 8400 GS', '0424'],\n ['GeForce 8600M GS', '0425'],\n ['GeForce 8400M GT', '0426'],\n ['GeForce 8400M GS', '0427'],\n ['GeForce 8400M G', '0428'],\n ['GeForce 9400 GT', '042C'],\n ['GeForce 9300M G', '042E'],\n ['GeForce 7150M / nForce 630M', '0531'],\n ['GeForce 7000M / nForce 610M', '0533'],\n ['GeForce 7050 PV / nForce 630a', '053A'],\n ['GeForce 7050 PV / nForce 630a', '053B'],\n ['GeForce 7025 / nForce 630a', '053E'],\n ['GeForce GTX 295', '05E0'],\n ['GeForce GTX 280', '05E1'],\n ['GeForce GTX 260', '05E2'],\n ['GeForce GTX 285', '05E3'],\n ['GeForce GTX 275', '05E6'],\n ['GeForce GTX 260', '05EA'],\n ['GeForce GTX 295', '05EB'],\n ['GeForce 8800 GTS 512', '0600'],\n ['GeForce 9800 GT', '0601'],\n ['GeForce 8800 GT', '0602'],\n ['GeForce GT 230', '0603'],\n ['GeForce 9800 GX2', '0604'],\n ['GeForce 9800 GT', '0605'],\n ['GeForce 8800 GS', '0606'],\n ['GeForce GTS 240', '0607'],\n ['GeForce 9800M GTX', '0608'],\n ['GeForce 8800M GTS', '0609'],\n ['GeForce GTX 280M', '060A'],\n ['GeForce 9800M GT', '060B'],\n ['GeForce 8800M GTX', '060C'],\n ['GeForce 8800 GS', '060D'],\n ['GeForce GTX 285M', '060F'],\n ['GeForce 9600 GSO', '0610'],\n ['GeForce 8800 GT', '0611'],\n ['GeForce 9800 GTX/9800 GTX+', '0612'],\n ['GeForce 9800 GTX+', '0613'],\n ['GeForce 9800 GT', '0614'],\n ['GeForce GTS 250', '0615'],\n ['GeForce 9800M GTX', '0617'],\n ['GeForce GTX 260M', '0618'],\n ['GeForce GT 230', '0621'],\n ['GeForce 9600 GT', '0622'],\n ['GeForce 9600 GS', '0623'],\n ['GeForce 9600 GSO 512', '0625'],\n ['GeForce GT 130', '0626'],\n ['GeForce GT 140', '0627'],\n ['GeForce 9800M GTS', '0628'],\n ['GeForce 9700M GTS', '062A'],\n ['GeForce 9800M GS', '062B'],\n ['GeForce 9800M GTS', '062C'],\n ['GeForce 9600 GT', '062D'],\n ['GeForce 9600 GT', '062E'],\n ['GeForce 9700 S', '0630'],\n ['GeForce GTS 160M', '0631'],\n ['GeForce GTS 150M', '0632'],\n ['GeForce 9600 GSO', '0635'],\n ['GeForce 9600 GT', '0637'],\n ['GeForce 9500 GT', '0640'],\n ['GeForce 9400 GT', '0641'],\n ['GeForce 9500 GT', '0643'],\n ['GeForce 9500 GS', '0644'],\n ['GeForce 9500 GS', '0645'],\n ['GeForce GT 120', '0646'],\n ['GeForce 9600M GT', '0647'],\n ['GeForce 9600M GS', '0648'],\n ['GeForce 9600M GT', '0649'],\n ['GeForce 9700M GT', '064A'],\n ['GeForce 9500M G', '064B'],\n ['GeForce 9650M GT', '064C'],\n ['GeForce G 110M', '0651'],\n ['GeForce GT 130M', '0652'],\n ['GeForce GT 120M', '0653'],\n ['GeForce GT 220M', '0654'],\n ['GeForce GT 120', '0655 0633'],\n ['GeForce 9650 S', '0656'],\n ['GeForce 9400 GT', '065B'],\n ['GeForce GTX 480', '06C0'],\n ['GeForce GTX 465', '06C4'],\n ['GeForce GTX 480M', '06CA'],\n ['GeForce GTX 470', '06CD'],\n ['GeForce 9300 GE', '06E0'],\n ['GeForce 9300 GS', '06E1'],\n ['GeForce 8400', '06E2'],\n ['GeForce 8400 SE', '06E3'],\n ['GeForce 8400 GS', '06E4'],\n ['GeForce 9300M GS', '06E5'],\n ['GeForce G100', '06E6'],\n ['GeForce 9300 SE', '06E7'],\n ['GeForce 9200M GS', '06E8'],\n ['GeForce 9300M GS', '06E9'],\n ['GeForce G 105M', '06EC'],\n ['GeForce G 103M', '06EF'],\n ['GeForce G105M', '06F1'],\n ['GeForce 7150 / nForce 630i', '07E0'],\n ['GeForce 7100 / nForce 630i', '07E1'],\n ['GeForce 7050 / nForce 630i', '07E2'],\n ['GeForce 7050 / nForce 610i', '07E3'],\n ['GeForce 7050 / nForce 620i', '07E5'],\n ['GeForce 8200M', '0840'],\n ['GeForce 9100M G', '0844'],\n ['GeForce 8200M G', '0845'],\n ['GeForce 9200', '0846'],\n ['GeForce 9100', '0847'],\n ['GeForce 8300', '0848'],\n ['GeForce 8200', '0849'],\n ['nForce 730a', '084A'],\n ['GeForce 9200', '084B'],\n ['nForce 980a/780a SLI', '084C'],\n ['nForce 750a SLI', '084D'],\n ['GeForce 8100 / nForce 720a', '084F'],\n ['GeForce 9400', '0860'],\n ['GeForce 9400', '0861'],\n ['GeForce 9400M G', '0862'],\n ['GeForce 9400M', '0863'],\n ['GeForce 9300', '0864'],\n ['ION', '0865'],\n ['GeForce 9400M G', '0866'],\n ['GeForce 9400', '0867'],\n ['nForce 760i SLI', '0868'],\n ['GeForce 9400', '0869'],\n ['GeForce 9400', '086A'],\n ['GeForce 9300 / nForce 730i', '086C'],\n ['GeForce 9200', '086D'],\n ['GeForce 9100M G', '086E'],\n ['GeForce 8200M G', '086F'],\n ['GeForce 9400M', '0870'],\n ['GeForce 9200', '0871'],\n ['GeForce G102M', '0872'],\n ['GeForce G102M', '0873'],\n ['ION', '0874'],\n ['ION', '0876'],\n ['GeForce 9400', '087A'],\n ['ION', '087D'],\n ['ION LE', '087E'],\n ['ION LE', '087F'],\n ['GeForce 320M', '08A0'],\n ['GeForce 320M', '08A2'],\n ['GeForce 320M', '08A3'],\n ['GeForce 320M', '08A4'],\n ['GeForce 320M', '08A5'],\n ['GeForce GT 220', '0A20'],\n ['GeForce 315', '0A22'],\n ['GeForce 210', '0A23'],\n ['GeForce 405', '0A26'],\n ['GeForce 405', '0A27'],\n ['GeForce GT 230M', '0A28'],\n ['GeForce GT 330M', '0A29'],\n ['GeForce GT 230M', '0A2A'],\n ['GeForce GT 330M', '0A2B'],\n ['GeForce GT 320M', '0A2D'],\n ['GeForce GT 415', '0A32'],\n ['GeForce GT 240M', '0A34'],\n ['GeForce GT 325M', '0A35'],\n ['GeForce G210', '0A60'],\n ['GeForce 205', '0A62'],\n ['GeForce 310', '0A63'],\n ['Second Generation ION', '0A64'],\n ['GeForce 210', '0A65'],\n ['GeForce 310', '0A66'],\n ['GeForce 315', '0A67'],\n ['GeForce G105M', '0A68'],\n ['GeForce G105M', '0A69'],\n ['GeForce 305M', '0A6E'],\n ['Second Generation ION', '0A6F'],\n ['GeForce 310M', '0A70'],\n ['GeForce 305M', '0A71'],\n ['GeForce 310M', '0A72'],\n ['GeForce 305M', '0A73'],\n ['GeForce G210M', '0A74'],\n ['GeForce 310M', '0A75'],\n ['Second Generation ION', '0A76'],\n ['GeForce 315M', '0A7A'],\n ['GeForce GT 330', '0CA0'],\n ['GeForce GT 320', '0CA2'],\n ['GeForce GT 240', '0CA3'],\n ['GeForce GT 340', '0CA4'],\n ['GeForce GT 220', '0CA5'],\n ['GeForce GT 330', '0CA7'],\n ['GeForce GTS 260M', '0CA8'],\n ['GeForce GTS 250M', '0CA9'],\n ['GeForce GT 220', '0CAC'],\n ['GeForce GT 335M', '0CAF'],\n ['GeForce GTS 350M', '0CB0'],\n ['GeForce GTS 360M', '0CB1'],\n ['GeForce GT 440', '0DC0'],\n ['GeForce GTS 450', '0DC4'],\n ['GeForce GTS 450', '0DC5'],\n ['GeForce GTS 450', '0DC6'],\n ['GeForce GT 555M', '0DCD'],\n ['GeForce GT 555M', '0DCE'],\n ['GeForce GTX 460M', '0DD1'],\n ['GeForce GT 445M', '0DD2'],\n ['GeForce GT 435M', '0DD3'],\n ['GeForce GT 550M', '0DD6'],\n ['GeForce GT 440', '0DE0'],\n ['GeForce GT 430', '0DE1'],\n ['GeForce GT 420', '0DE2'],\n ['GeForce GT 635M', '0DE3'],\n ['GeForce GT 520', '0DE4'],\n ['GeForce GT 530', '0DE5'],\n ['GeForce GT 620M', '0DE8'],\n ['GeForce GT 630M', '0DE9'],\n ['GeForce 610M', '0DEA'],\n ['GeForce GT 555M', '0DEB'],\n ['GeForce GT 525M', '0DEC'],\n ['GeForce GT 520M', '0DED'],\n ['GeForce GT 415M', '0DEE'],\n ['GeForce GT 425M', '0DF0'],\n ['GeForce GT 420M', '0DF1'],\n ['GeForce GT 435M', '0DF2'],\n ['GeForce GT 420M', '0DF3'],\n ['GeForce GT 540M', '0DF4'],\n ['GeForce GT 525M', '0DF5'],\n ['GeForce GT 550M', '0DF6'],\n ['GeForce GT 520M', '0DF7'],\n ['GeForce GTX 460', '0E22'],\n ['GeForce GTX 460 SE', '0E23'],\n ['GeForce GTX 460', '0E24'],\n ['GeForce GTX 470M', '0E30'],\n ['GeForce GTX 485M', '0E31'],\n ['GeForce GT 630', '0F00'],\n ['GeForce GT 620', '0F01'],\n ['GeForce GT 640', '0FC0'],\n ['GeForce GT 640', '0FC1'],\n ['GeForce GT 630', '0FC2'],\n ['GeForce GTX 650', '0FC6'],\n ['GeForce GT 640M LE', '0FCE'],\n ['GeForce GT 650M', '0FD1'],\n ['GeForce GT 640M', '0FD2'],\n ['GeForce GT 640M LE', '0FD3'],\n ['GeForce GTX 660M', '0FD4'],\n ['GeForce GT 650M', '0FD5'],\n ['GeForce GT 640M', '0FD8'],\n ['GeForce GT 645M', '0FD9'],\n ['GeForce GTX 660M', '0FE0'],\n ['GeForce GT 520', '1040'],\n ['GeForce 510', '1042'],\n ['GeForce 605', '1048'],\n ['GeForce GT 620', '1049'],\n ['GeForce GT 610', '104A'],\n ['GeForce GT 520M', '1050'],\n ['GeForce GT 520MX', '1051'],\n ['GeForce GT 520M', '1052'],\n ['GeForce 410M', '1054'],\n ['GeForce 410M', '1055'],\n ['GeForce 610M', '1058'],\n ['GeForce 610M', '1059'],\n ['GeForce 610M', '105A'],\n ['GeForce GTX 580', '1080'],\n ['GeForce GTX 570', '1081'],\n ['GeForce GTX 560 Ti', '1082'],\n ['GeForce GTX 560', '1084'],\n ['GeForce GTX 570', '1086'],\n ['GeForce GTX 560 Ti', '1087'],\n ['GeForce GTX 590', '1088'],\n ['GeForce GTX 580', '1089'],\n ['GeForce GTX 580', '108B'],\n ['GeForce 9300 GS', '10C0'],\n ['GeForce 8400GS', '10C3'],\n ['GeForce 405', '10C5'],\n ['GeForce GT 630M', '1140 0565'],\n ['GeForce GT 630M', '1140 0568'],\n ['GeForce GT 620M', '1140 067A'],\n ['GeForce GT 620M', '1140 0680'],\n ['GeForce GT 620M', '1140 20DD'],\n ['GeForce GTX 680', '1180'],\n ['GeForce GTX 660 Ti', '1183'],\n ['GeForce GTX 660', '1185'],\n ['GeForce GTX 690', '1188'],\n ['GeForce GTX 670', '1189'],\n ['GeForce GTX 680M', '11A0'],\n ['GeForce GTX 670MX', '11A1'],\n ['GeForce GTX 675MX', '11A7'],\n ['GeForce GTX 660', '11C0'],\n ['GeForce GTX 650 Ti', '11C6'],\n ['GeForce GTX 560 Ti', '1200'],\n ['GeForce GTX 560', '1201'],\n ['GeForce GTX 460 SE v2', '1203'],\n ['GeForce GTX 460 v2', '1205'],\n ['GeForce GTX 555', '1206'],\n ['GeForce GT 645', '1207'],\n ['GeForce GTX 560 SE', '1208'],\n ['GeForce GTX 570M', '1210'],\n ['GeForce GTX 580M', '1211'],\n ['GeForce GTX 675M', '1212'],\n ['GeForce GTX 670M', '1213'],\n ['GeForce GT 545', '1241'],\n ['GeForce GT 545', '1243'],\n ['GeForce GTX 550 Ti', '1244'],\n ['GeForce GTS 450', '1245'],\n ['GeForce GT 550M', '1246'],\n ['GeForce GT 555M', '1247'],\n ['GeForce GT 635M', '1247 212A'],\n ['GeForce GT 635M', '1247 212B'],\n ['GeForce GT 635M', '1247 212C'],\n ['GeForce GT 555M', '1248'],\n ['GeForce GTS 450', '1249'],\n ['GeForce GT 640', '124B'],\n ['GeForce GT 555M', '124D'],\n ['GeForce GT 635M', '124D 10CC'],\n ['GeForce GTX 560M', '1251'],\n ['Quadro FX 4000', '004E'],\n ['Quadro FX 4500', '009D'],\n ['Quadro FX Go1400', '00CC'],\n ['Quadro FX 3450/4000 SDI', '00CD'],\n ['Quadro FX 1400', '00CE'],\n ['Quadro FX 3400/Quadro FX 4000', '00F8'],\n ['Quadro FX 540M', '014C'],\n ['Quadro FX 550', '014D'],\n ['Quadro FX 540', '014E'],\n ['Quadro FX 5600', '019D'],\n ['Quadro FX 4600', '019E'],\n ['Quadro FX 350M', '01DC'],\n ['Quadro FX 350', '01DE'],\n ['Quadro FX 2500M', '029A'],\n ['Quadro FX 1500M', '029B'],\n ['Quadro FX 5500', '029C'],\n ['Quadro FX 3500', '029D'],\n ['Quadro FX 1500', '029E'],\n ['Quadro FX 4500 X2', '029F'],\n ['Quadro FX 560M', '039C'],\n ['Quadro FX 560', '039E'],\n ['Quadro FX 370', '040A'],\n ['Quadro FX 570M', '040C'],\n ['Quadro FX 1600M', '040D'],\n ['Quadro FX 570', '040E'],\n ['Quadro FX 1700', '040F'],\n ['Quadro FX 360M', '042D'],\n ['Quadroplex 2200 D2', '05ED'],\n ['Quadroplex 2200 S4', '05F8'],\n ['Quadro CX', '05F9'],\n ['Quadro FX 5800', '05FD'],\n ['Quadro FX 4800', '05FE'],\n ['Quadro FX 3800', '05FF'],\n ['Quadro FX 4700 X2', '0619'],\n ['Quadro FX 3700', '061A'],\n ['Quadro VX 200', '061B'],\n ['Quadro FX 3600M', '061C'],\n ['Quadro FX 2800M', '061D'],\n ['Quadro FX 3700M', '061E'],\n ['Quadro FX 3800M', '061F'],\n ['Quadro FX 1800', '0638'],\n ['Quadro FX 2700M', '063A'],\n ['Quadro FX 380', '0658'],\n ['Quadro FX 580', '0659'],\n ['Quadro FX 1700M', '065A'],\n ['Quadro FX 770M', '065C'],\n ['Quadro 6000', '06D8'],\n ['Quadro 5000', '06D9'],\n ['Quadro 5000M', '06DA'],\n ['Quadro 6000', '06DC'],\n ['Quadro 4000', '06DD'],\n ['Quadro FX 370 LP', '06F9'],\n ['Quadro FX 370M', '06FB'],\n ['HICx16 + Graphics', '06FF'],\n ['Quadro 400', '0A38'],\n ['Quadro FX 880M', '0A3C'],\n ['Quadro FX 380 LP', '0A78'],\n ['Quadro FX 380M', '0A7C'],\n ['Quadro FX 1800M', '0CBC'],\n ['Quadro 2000', '0DD8'],\n ['Quadro 2000D', '0DD8 0914'],\n ['Quadro 2000M', '0DDA'],\n ['Quadro 600', '0DF8'],\n ['Quadro 500M', '0DF9'],\n ['Quadro 1000M', '0DFA'],\n ['Quadro 3000M', '0E3A'],\n ['Quadro 4000M', '0E3B'],\n ['Quadro K2000M', '0FFB'],\n ['Quadro K1000M', '0FFC'],\n ['Quadro 410', '0FFF'],\n ['Quadro 5010M', '109A'],\n ['Quadro 7000', '109B'],\n ['Quadro K5000', '11BA'],\n ['Quadro K5000M', '11BC'],\n ['Quadro K4000M', '11BD'],\n ['Quadro K3000M', '11BE'],\n ['Quadro NVS 440', '014A'],\n ['Quadro NVS 285', '0165'],\n ['Quadro NVS 110M', '01DA'],\n ['Quadro NVS 120M', '01DB'],\n ['Quadro NVS 210S / GeForce 6150LE', '0245'],\n ['Quadro NVS 510M', '0299'],\n ['Quadro NVS 320M', '040B'],\n ['Quadro NVS 140M', '0429'],\n ['Quadro NVS 130M', '042A'],\n ['Quadro NVS 135M', '042B'],\n ['Quadro NVS 290', '042F'],\n ['Quadro NVS 150M', '06EA'],\n ['Quadro NVS 160M', '06EB'],\n ['Quadro NVS 420', '06F8'],\n ['Quadro NVS 450', '06FA'],\n ['Quadro NVS 295', '06FD'],\n ['NVS 5100M', '0A2C'],\n ['NVS 2100M', '0A6A'],\n ['NVS 3100M', '0A6C'],\n ['NVS 5400M', '0DEF'],\n ['NVS 5200M', '0DFC'],\n ['NVS 510', '0FFD'],\n ['NVS 4200M', '1056'],\n ['NVS 4200M', '1057'],\n ['NVS 310', '107D'],\n ['NVS 300', '10D8'],\n ['Tesla C870', '0197'],\n ['Tesla C1060', '05E7'],\n ['Tesla T10 Processor', '05E7 0595'],\n ['Tesla T10 Processor', '05E7 068F'],\n ['Tesla M1060', '05E7 0697'],\n ['Tesla M1060', '05E7 0743'],\n ['Tesla C2050 / C2070', '06D1'],\n ['Tesla C2070', '06D1 0772'],\n ['Tesla M2070', '06D2'],\n ['Tesla T20 Processor', '06DE'],\n ['Tesla M2050', '06DE 082F'],\n ['Tesla M2050', '06DE 0846'],\n ['Tesla M2070-Q', '06DF'],\n ['Tesla K20c', '1022'],\n ['Tesla K20m', '1028'],\n ['Tesla M2090', '1091'],\n ['Tesla X2090', '1091 0974'],\n ['Tesla M2075', '1094'],\n ['Tesla C2075', '1096'],\n ['Tesla K10', '118F'],\n ['VGX K1', '0FF2'],\n ['VGX K2', '11BF']\n ]\n return lst\n\n\ndef getNvidia_173():\n lst = [\n ['GeForce PCX 5750', '00FA'],\n ['GeForce PCX 5900', '00FB'],\n ['Quadro FX 330/GeForce PCX 5300', '00FC'],\n ['Quadro FX 330/Quadro NVS 280 PCI-E', '00FD'],\n ['Quadro FX 1300', '00FE'],\n ['GeForce FX 5800 Ultra', '0301'],\n ['GeForce FX 5800', '0302'],\n ['Quadro FX 2000', '0308'],\n ['Quadro FX 1000', '0309'],\n ['GeForce FX 5600 Ultra', '0311'],\n ['GeForce FX 5600', '0312'],\n ['GeForce FX 560T', '0314'],\n ['GeForce FX Go5600', '031A'],\n ['GeForce FX Go5650', '031B'],\n ['Quadro FX Go700', '031C'],\n ['GeForce FX 5200', '0320'],\n ['GeForce FX 5200 Ultra', '0321'],\n ['GeForce FX 5200', '0322'],\n ['GeForce FX 5200LE', '0323'],\n ['GeForce FX Go5200', '0324'],\n ['GeForce FX Go5250', '0325'],\n ['GeForce FX 5500', '0326'],\n ['GeForce FX 5100', '0327'],\n ['GeForce FX Go5200 32M/64M', '0328'],\n ['Quadro NVS 55/280 PCI', '032A'],\n ['Quadro FX 500/FX 600', '032B'],\n ['GeForce FX Go53xx', '032C'],\n ['GeForce FX Go5100', '032D'],\n ['GeForce FX 5900 Ultra', '0330'],\n ['GeForce FX 5900', '0331'],\n ['GeForce FX 590T', '0332'],\n ['GeForce FX 5950 Ultra', '0333'],\n ['GeForce FX 5900ZT', '0334'],\n ['Quadro FX 3000', '0338'],\n ['Quadro FX 700', '033F'],\n ['GeForce FX 5700 Ultra', '0341'],\n ['GeForce FX 5700', '0342'],\n ['GeForce FX 5700LE', '0343'],\n ['GeForce FX 5700VE', '0344'],\n ['GeForce FX Go5700', '0347'],\n ['GeForce FX Go5700', '0348'],\n ['Quadro FX Go1000', '034C'],\n ['Quadro FX 1100', '034E']\n ]\n return lst\n\n\ndef getNvidia_96():\n lst = [\n ['GeForce2 MX/MX 400', '0110'],\n ['GeForce2 MX 100/200', '0111'],\n ['GeForce2 Go', '0112'],\n ['Quadro2 MXR/EX/Go', '0113'],\n ['GeForce4 MX 460', '0170'],\n ['GeForce4 MX 440', '0171'],\n ['GeForce4 MX 420', '0172'],\n ['GeForce4 MX 440-SE', '0173'],\n ['GeForce4 440 Go', '0174'],\n ['GeForce4 420 Go', '0175'],\n ['GeForce4 420 Go 32M', '0176'],\n ['GeForce4 460 Go', '0177'],\n ['Quadro4 550 XGL', '0178'],\n ['GeForce4 440 Go 64M', '0179'],\n ['Quadro NVS 400', '017A'],\n ['Quadro4 500 GoGL', '017C'],\n ['GeForce4 410 Go 16M', '017D'],\n ['GeForce4 MX 440 with AGP8X', '0181'],\n ['GeForce4 MX 440SE with AGP8X', '0182'],\n ['GeForce4 MX 420 with AGP8X', '0183'],\n ['GeForce4 MX 4000', '0185'],\n ['Quadro4 580 XGL', '0188'],\n ['Quadro NVS 280 SD', '018A'],\n ['Quadro4 380 XGL', '018B'],\n ['Quadro NVS 50 PCI', '018C'],\n ['GeForce2 Integrated GPU', '01A0'],\n ['GeForce4 MX Integrated GPU', '01F0'],\n ['GeForce3', '0200'],\n ['GeForce3 Ti 200', '0201'],\n ['GeForce3 Ti 500', '0202'],\n ['Quadro DCC', '0203'],\n ['GeForce4 Ti 4600', '0250'],\n ['GeForce4 Ti 4400', '0251'],\n ['GeForce4 Ti 4200', '0253'],\n ['Quadro4 900 XGL', '0258'],\n ['Quadro4 750 XGL', '0259'],\n ['Quadro4 700 XGL', '025B'],\n ['GeForce4 Ti 4800', '0280'],\n ['GeForce4 Ti 4200 with AGP8X', '0281'],\n ['GeForce4 Ti 4800 SE', '0282'],\n ['GeForce4 4200 Go', '0286'],\n ['Quadro4 980 XGL', '0288'],\n ['Quadro4 780 XGL', '0289'],\n ['Quadro4 700 GoGL', '028C']\n ]\n return lst\n\n\ndef getNvidia_71():\n lst = [\n ['RIVA TNT', '0020'],\n ['RIVA TNT2/TNT2 Pro', '0028'],\n ['RIVA TNT2 Ultra', '0029'],\n ['Vanta/Vanta LT', '002C'],\n ['RIVA TNT2 Model 64/Model 64 Pro', '002D'],\n ['Aladdin TNT2', '00A0'],\n ['GeForce 256', '0100'],\n ['GeForce DDR', '0101'],\n ['Quadro', '0103'],\n ['GeForce2 GTS/GeForce2 Pro', '0150'],\n ['GeForce2 Ti', '0151'],\n ['GeForce2 Ultra', '0152'],\n ['Quadro2 Pro', '0153']\n ]\n return lst\n" }, { "alpha_fraction": 0.5812175869941711, "alphanum_fraction": 0.5845845937728882, "avg_line_length": 37.55789566040039, "blob_id": "6ee283f367141ad07daeefb5ab74d38ae47d3d7f", "content_id": "12bb137665982140858894446693284282dcdd3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10989, "license_type": "no_license", "max_line_length": 171, "num_lines": 285, "path": "/usr/lib/device-driver-manager/ddm.py", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\ntry:\n import os\n import sys\n import pygtk\n pygtk.require('2.0')\n import gtk\n import threading\n import glib\n import functions\n import string\n import getopt\n from drivers import DriverCheck, DriverInstall, DriverRemove\n from dialogs import MessageDialog\n from logger import Logger\nexcept Exception, detail:\n print detail\n sys.exit(1)\n\n\n#class for the main window\nclass DebianDriverManager:\n\n version = ''\n hwList = []\n hwPreSelectList = []\n install = False\n debug = False\n mediaDir = '/usr/share/device-driver-manager'\n logPath = ''\n paeChecked = False\n\n def __init__(self):\n # Load window and widgets\n self.builder = gtk.Builder()\n self.builder.add_from_file('/usr/share/device-driver-manager/ddm.glade')\n self.window = self.builder.get_object('ManagerWindow')\n self.lblText = self.builder.get_object('lblText')\n self.tvHardware = self.builder.get_object('tvHardware')\n self.btnInstall = self.builder.get_object('btnInstall')\n self.btnRemove = self.builder.get_object('btnRemove')\n self.btnClose = self.builder.get_object('btnClose')\n self.spinner = self.builder.get_object('spinner')\n self.statusbar = self.builder.get_object('statusbar')\n\n # Add events\n signals = {\n 'on_tvHardware_cursor_changed': self.cursorChanged,\n 'on_btnInstall_clicked': self.installHardware,\n 'on_btnRemove_clicked': self.removeHardware,\n 'on_btnClose_clicked': self.destroy,\n 'on_ManagerWindow_destroy': self.destroy\n }\n self.builder.connect_signals(signals)\n\n self.window.show()\n\n # Fill the hardware tree view\n def fillHardware(self):\n hwFound = False\n contentList = []\n\n # Get a list of supported hardware and images\n self.hwList = DriverCheck(self.log).run()\n hwImgList = functions.getImgsFromDir(self.mediaDir)\n for item in self.hwList:\n hwFound = True\n hwImg = os.path.join(self.mediaDir, 'empty.png')\n install = True\n\n # Check if there is a hardware image available\n for img in hwImgList:\n if item[1] + '.png' in img:\n self.log.write('Hardware image found: ' + img, 'ddm.fillHardware', 'info')\n hwImg = img\n break\n\n # Check the status of the driver\n statImg = os.path.join(self.mediaDir, item[2] + '.png')\n if item[1] in self.hwPreSelectList:\n install = True\n elif item[2] == functions.packageStatus[0] or item[2] == functions.packageStatus[2]:\n install = False\n\n # PAE check\n if item[1] == 'pae' and install:\n self.paeChecked = True\n\n # Add the row to the content list\n self.log.write('Add item: ' + item[0], 'ddm.fillHardware', 'info')\n self.log.write('Preselect: ' + str(install), 'ddm.fillHardware', 'debug')\n row = [install, statImg, hwImg, item[0], item[1], item[2]]\n contentList.append(row)\n\n # If nothing found: show message\n if not hwFound:\n columnTypesList = ['str']\n msg = 'No supported hardware detected'\n contentList.append(msg)\n self.log.write(msg, 'ddm.fillHardware', 'warning')\n functions.fillTreeview(self.tvHardware, contentList, columnTypesList)\n self.btnInstall.set_sensitive(False)\n else:\n columnTypesList = ['bool', 'gtk.gdk.Pixbuf', 'gtk.gdk.Pixbuf', 'str', 'str', 'str']\n functions.fillTreeview(self.tvHardware, contentList, columnTypesList, [4, 5])\n\n # Return the value of a given option\n def getValueForOption(self, searchList, option):\n val = ''\n for img in searchList:\n if img[0] == option:\n val = img[1]\n self.log.write('Value found in list: ' + val, 'ddm.getValueForOption', 'debug')\n break\n return val\n\n # Get all the selected hardware drivers and pass this to the hardware driver install program (to be done)\n def handleHardware(self, actionString):\n hwSelected = False\n selHw = []\n chkList = functions.getColumnValues(self.tvHardware, 0)\n hwList = functions.getColumnValues(self.tvHardware, 4)\n statList = functions.getColumnValues(self.tvHardware, 5)\n for i in range(len(chkList)):\n if chkList[i]:\n self.log.write(actionString + ' hardware code: ' + hwList[i], 'ddm.handleHardware', 'info')\n selHw.append([hwList[i], statList[i]])\n hwSelected = True\n\n if hwSelected:\n # Install selected drivers\n self.toggleGuiElements(True)\n # Start saving in a separate thread\n self.log.write('Start driver ' + actionString + ' thread', 'ddm.handleHardware', 'info')\n if actionString == 'install':\n t = DriverInstall(selHw, self.log)\n else:\n t = DriverRemove(selHw, self.log)\n t.start()\n # Run spinner as long as the thread is alive\n self.log.write('Check every 5 seconds if thread is still active', 'ddm.installHardware', 'debug')\n glib.timeout_add(5, self.checkThread, actionString)\n else:\n msg = 'Select a driver to install.'\n MessageDialog('Driver install', msg, gtk.MESSAGE_INFO, self.window).show()\n\n def installHardware(self, widget):\n self.handleHardware('install')\n\n def removeHardware(self, widget):\n self.handleHardware('remove')\n\n def checkThread(self, actionString):\n #print 'Thread count = ' + str(threading.active_count())\n # As long there's a thread active, keep spinning\n if threading.active_count() > 1:\n self.spinner.start()\n return True\n\n # Thread is done: stop spinner and make button sensitive again\n self.hwPreSelectList = []\n self.fillHardware()\n self.toggleGuiElements(False)\n # Show message that we're done\n if actionString == 'install':\n msg = 'Done installing drivers.'\n else:\n msg = 'Done removing drivers.'\n msg += '\\n\\nPlease, reboot your system.'\n MessageDialog('Driver ' + actionString, msg, gtk.MESSAGE_INFO, self.window).show()\n return False\n\n def toggleGuiElements(self, startSave):\n if startSave:\n self.btnInstall.set_sensitive(False)\n self.btnRemove.set_sensitive(False)\n self.btnClose.set_sensitive(False)\n self.tvHardware.set_sensitive(False)\n self.spinner.show()\n self.spinner.start()\n else:\n self.spinner.stop()\n self.spinner.hide()\n self.btnInstall.set_sensitive(True)\n self.btnRemove.set_sensitive(True)\n self.btnClose.set_sensitive(True)\n self.tvHardware.set_sensitive(True)\n\n # Check if PAE is selected\n # PAE must be installed before any other drivers are installed\n def cursorChanged(self, treeview):\n colNr = len(self.tvHardware.get_columns())\n if colNr >= 4:\n hwCode = functions.getSelectedValue(self.tvHardware, 4)\n checked = functions.getSelectedValue(self.tvHardware, 0)\n\n if hwCode == 'pae':\n if checked:\n self.paeChecked = True\n if not self.hwPreSelectList:\n msg = 'Install PAE before installing any other drivers.\\n\\nOther drivers are deselected (if any).'\n MessageDialog('PAE install check', msg, gtk.MESSAGE_INFO, self.window).show()\n functions.treeviewToggleAll(self.tvHardware, 0, False, 4, 'pae')\n else:\n self.paeChecked = False\n else:\n if checked:\n if self.paeChecked:\n if not self.hwPreSelectList:\n msg = 'Install PAE before installing any other drivers\\nor deselect PAE to install drivers for the current kernel'\n MessageDialog('PAE install check', msg, gtk.MESSAGE_INFO, self.window).show()\n functions.treeviewToggleAll(self.tvHardware, 0, False, 4, 'pae')\n\n def main(self, argv):\n # Handle arguments\n try:\n opts, args = getopt.getopt(argv, 'ic:dfl:', ['install', 'codes=', 'debug', 'force', 'log='])\n except getopt.GetoptError:\n print 'Arguments cannot be parsed: ' + str(argv)\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in ('-d', '--debug'):\n self.debug = True\n elif opt in ('-i', '--install'):\n self.install = True\n elif opt in ('-c', '--codes'):\n self.hwPreSelectList = arg.split(',')\n elif opt in ('-l', '--log'):\n self.logPath = arg\n\n # Initialize logging\n if self.debug:\n if self.logPath == '':\n self.logPath = 'ddm.log'\n self.log = Logger(self.logPath, 'debug', True, self.statusbar, self.window)\n functions.log = self.log\n\n # Set initial values\n self.lblText.set_text('Currently Nvidia, ATI and Broadcom drivers are supported and it detects multi-core 32-bit systems so that the PAE kernel can be installed.')\n\n # Show message that we're busy\n self.btnInstall.set_sensitive(False)\n self.btnRemove.set_sensitive(False)\n msg = 'Checking your hardware...'\n self.log.write(msg, 'ddm.main', 'info')\n functions.pushMessage(self.statusbar, msg)\n functions.repaintGui()\n\n # Fill hardware list\n self.fillHardware()\n self.btnInstall.set_sensitive(True)\n self.btnRemove.set_sensitive(True)\n\n # Show version number in status bar\n self.version = functions.getPackageVersion('device-driver-manager')\n functions.pushMessage(self.statusbar, self.version)\n\n # Start automatic install\n if self.install:\n self.log.write('Start automatic driver install', 'ddm.main', 'info')\n self.installHardware(None)\n\n # Show window and keep it on top of other windows\n self.window.set_keep_above(True)\n gtk.main()\n\n def destroy(self, widget, data=None):\n # Close the app\n gtk.main_quit()\n\n\nif __name__ == '__main__':\n # Flush print when it's called\n sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)\n # Create an instance of our GTK application\n app = DebianDriverManager()\n\n # Very dirty: replace the : back again with -\n # before passing the arguments\n args = sys.argv[1:]\n for i in range(len(args)):\n args[i] = string.replace(args[i], ':', '-')\n app.main(args)\n" }, { "alpha_fraction": 0.5642657279968262, "alphanum_fraction": 0.5731738209724426, "avg_line_length": 42.89944076538086, "blob_id": "049a70c304fb8473a2315f9149bf4a18018110b5", "content_id": "fdf5c576c9f47c2299342c471181c1ccf9efcf6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7858, "license_type": "no_license", "max_line_length": 178, "num_lines": 179, "path": "/usr/lib/device-driver-manager/ati.py", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport re\nimport functions\nfrom execcmd import ExecCmd\n\npackageStatus = ['installed', 'notinstalled', 'uninstallable']\nhwCodes = ['nvidia', 'ati', 'broadcom', 'pae', 'mirror']\natiStartSerie = 5000\n\n\nclass ATI():\n\n def __init__(self, distribution, loggerObject):\n self.distribution = distribution.lower()\n self.log = loggerObject\n self.ec = ExecCmd(self.log)\n self.hw = functions.getGraphicsCard()\n\n # Test\n #self.hw = '01:00.0 VGA compatible controller [0300]: Advanced Micro Devices [AMD] nee ATI Manhattan [Mobility Radeon HD 5400 Series] [1002:68e0]'\n\n # Called from drivers.py: Check for ATI\n def getATI(self):\n # Check for ATI cards\n hwList = []\n # Is it ATI?\n nvChk = re.search('\\\\b' + hwCodes[1] + '\\\\b', self.hw.lower())\n if nvChk:\n self.log.write('ATI card found: ' + self.hw, 'ati.getATI', 'info')\n # Get the ATI chip set serie\n atiSerie = re.search('\\s\\d{4,}', self.hw)\n if atiSerie:\n self.log.write('ATI chip serie found: ' + atiSerie.group(0), 'ati.getATI', 'info')\n intSerie = functions.strToNumber(atiSerie.group(0))\n # Only add series from atiStartSerie\n if intSerie >= atiStartSerie:\n drv = self.getDriver()\n status = functions.getPackageStatus(drv)\n self.log.write('ATI ' + drv + ' status: ' + status, 'ati.getATI', 'debug')\n hwList.append([self.hw, hwCodes[1], status])\n else:\n self.log.write('ATI chip serie not supported: ' + str(intSerie), 'ati.getATI', 'warning')\n hwList.append([self.hw, hwCodes[1], packageStatus[2]])\n else:\n self.log.write('No ATI chip serie found: ' + self.hw, 'ati.getATI', 'warning')\n hwList.append([self.hw, hwCodes[1], packageStatus[2]])\n\n return hwList\n\n # Check distribution and get appropriate driver\n def getDriver(self):\n drv = ''\n if self.distribution == 'debian':\n drv = 'fglrx-driver'\n else:\n drv = 'fglrx'\n return drv\n\n # Get additional packages\n # The second value in the list is a numerical value:\n # 0 = Need to install, but removal before reinstallation is not needed\n # 1 = Need to install and removal is needed before reinstallation\n # 2 = Optional install\n def getAdditionalPackages(self, driver):\n drvList = []\n # Get the correct linux header package\n linHeader = functions.getLinuxHeadersAndImage()\n drvList.append([linHeader[0], 0])\n # Common packages\n if self.distribution == 'debian':\n drvList.append(['build-essential', 0])\n drvList.append(['module-assistant', 0])\n drvList.append([driver, 1])\n drvList.append(['fglrx-modules-dkms', 1])\n drvList.append(['libgl1-fglrx-glx', 1])\n drvList.append(['glx-alternative-fglrx', 0])\n drvList.append(['fglrx-control', 1])\n drvList.append(['fglrx-glx-ia32', 2])\n else:\n drvList.append([driver, 1])\n drvList.append(['fglrx-amdcccle', 1])\n return drvList\n\n # Install the given packages\n def installATIDriver(self, packageList):\n try:\n # Remove certain packages before installing the drivers\n for package in packageList:\n if package[1] == 1:\n if functions.isPackageInstalled(package[0]):\n self.log.write('Remove package: ' + package[0], 'ati.installATIDriver', 'debug')\n self.ec.run('apt-get -y --force-yes remove ' + package[0])\n\n # Preseed answers for some packages\n self.preseedATIPackages('install')\n\n # Install the packages\n installString = ''\n notInRepo = ''\n for package in packageList:\n chkStatus = functions.getPackageStatus(package[0])\n if chkStatus != packageStatus[2]:\n installString += ' ' + package[0]\n elif package[1] != 2:\n notInRepo += ', ' + package[0]\n\n if notInRepo == '':\n self.ec.run('apt-get -y --force-yes install' + installString)\n else:\n self.log.write('Install aborted: not in repository: ' + notInRepo[2:], 'ati.installATIDriver', 'error')\n\n except Exception, detail:\n self.log.write(detail, 'ati.installATIDriver', 'exception')\n\n # Called from drivers.py: install the ATI drivers\n def installATI(self):\n try:\n # Install the driver and create xorg.conf\n drv = self.getDriver()\n if drv != '':\n self.log.write('ATI driver to install: ' + drv, 'ati.installATI', 'info')\n packages = self.getAdditionalPackages(drv)\n self.installATIDriver(packages)\n # Configure ATI\n xorg = '/etc/X11/xorg.conf'\n if os.path.exists(xorg):\n self.log.write('Copy ' + xorg + ' to ' + xorg + '.ddm.bak', 'ati.installATI', 'info')\n self.ec.run('cp ' + xorg + ' ' + xorg + '.ddm.bak')\n self.log.write('Configure ATI', 'ati.installATI', 'debug')\n self.ec.run('aticonfig --initial -f')\n\n self.log.write('Done installing ATI drivers', 'ati.installATI', 'info')\n\n except Exception, detail:\n self.log.write(detail, 'ati.installATI', 'exception')\n\n # Called from drivers.py: remove the ATI drivers and revert to Nouveau\n def removeATI(self):\n try:\n self.log.write('Remove ATI drivers: fglrx', 'ati.removeATI', 'debug')\n\n # Preseed answers for some packages\n self.preseedATIPackages('purge')\n\n self.ec.run('apt-get -y --force-yes purge fglrx*')\n self.ec.run('apt-get -y --force-yes autoremove')\n self.ec.run('apt-get -y --force-yes install xserver-xorg-video-radeon xserver-xorg-video-nouveau xserver-xorg-video-ati libgl1-mesa-glx libgl1-mesa-dri libglu1-mesa')\n\n # Rename xorg.conf\n xorg = '/etc/X11/xorg.conf'\n if os.path.exists(xorg):\n self.log.write('Rename : ' + xorg + ' -> ' + xorg + '.ddm.bak', 'nvidia.removeNvidia', 'debug')\n os.rename(xorg, xorg + '.ddm.bak')\n\n self.log.write('Done removing ATI drivers', 'ati.removeATI', 'info')\n\n except Exception, detail:\n self.log.write(detail, 'ati.removeATI', 'exception')\n\n def preseedATIPackages(self, action):\n if self.distribution == 'debian':\n # Run on configured system and debconf-utils installed:\n # debconf-get-selections | grep fglrx > debconf-fglrx.seed\n # replace tabs with spaces and change the default answers (note=space, boolean=true or false)\n debConfList = []\n debConfList.append('libfglrx fglrx-driver/check-for-unsupported-gpu boolean false')\n debConfList.append('fglrx-driver fglrx-driver/check-xorg-conf-on-removal boolean false')\n debConfList.append('libfglrx fglrx-driver/install-even-if-unsupported-gpu-exists boolean false')\n debConfList.append('fglrx-driver fglrx-driver/removed-but-enabled-in-xorg-conf note ')\n debConfList.append('fglrx-driver fglrx-driver/needs-xorg-conf-to-enable note ')\n\n # Add each line to the debconf database\n for line in debConfList:\n os.system('echo \"' + line + '\" | debconf-set-selections')\n\n # Install or remove the packages\n self.ec.run('apt-get -y --force-yes ' + action + ' libfglrx fglrx-driver')\n" }, { "alpha_fraction": 0.5488836169242859, "alphanum_fraction": 0.5577715039253235, "avg_line_length": 45.130001068115234, "blob_id": "97bb45d3c428ca5c83753715852e29c437c72fe1", "content_id": "9a146e91ed8410cf1eb649920d85a44c6829f28d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4613, "license_type": "no_license", "max_line_length": 166, "num_lines": 100, "path": "/usr/lib/device-driver-manager/pae.py", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport functions\nfrom execcmd import ExecCmd\n\npackageStatus = ['installed', 'notinstalled', 'uninstallable']\nhwCodes = ['nvidia', 'ati', 'broadcom', 'pae', 'mirror']\n\n\nclass PAE():\n\n def __init__(self, distribution, loggerObject):\n self.distribution = distribution.lower()\n self.distributionReleaseNumber = functions.getDistributionReleaseNumber()\n self.log = loggerObject\n self.ec = ExecCmd(self.log)\n self.packages = functions.getLinuxHeadersAndImage(True, 'pae$', '-rt')\n\n # Check if the PAE kernel can be installed\n def getPae(self):\n hwList = []\n\n # Ubuntu is already PAE enabled from version 12.10 (Quantal) and LM 14 Nadia is based on Quantal: no need to check\n # https://help.ubuntu.com/community/EnablingPAE\n self.log.write('Distribution: ' + self.distribution + ' ' + str(self.distributionReleaseNumber), 'pae.getPae', 'debug')\n skipPae = False\n if (self.distribution == 'linuxmint' and self.distributionReleaseNumber >= 14) or (self.distribution == 'ubuntu' and self.distributionReleaseNumber >= 12.10):\n skipPae = True\n\n if not skipPae:\n # Get the kernel release\n kernelRelease = self.ec.run('uname -r')\n # Check the machine hardware\n machine = self.ec.run('uname -m')\n\n if not 'amd64' in kernelRelease[0]:\n if not 'pae' in kernelRelease[0]:\n self.log.write('Single-core kernel found: ' + kernelRelease[0], 'pae.getPae', 'debug')\n\n # Get #CPU's: cat /proc/cpuinfo | grep processor | wc -l\n if machine[0] == 'i686':\n self.log.write('Multi-core system running single-core kernel found', 'pae.getPae', 'info')\n # Check package status\n status = packageStatus[0]\n for package in self.packages:\n if not functions.isPackageInstalled(package):\n self.log.write('PAE not installed', 'pae.getPae', 'info')\n status = packageStatus[1]\n break\n hwList.append(['Multi-core support for 32-bit systems', hwCodes[3], status])\n elif machine[0] == 'x86_64':\n self.log.write('PAE skipped: 64-bit system', 'pae.getPae', 'debug')\n else:\n self.log.write('PAE kernel cannot be installed: single-core system', 'pae.getPae', 'warning')\n\n else:\n self.log.write('Multi-core already installed: ' + kernelRelease[0], 'pae.getPae', 'info')\n hwList.append(['Multi-core support for 32-bit systems', hwCodes[3], packageStatus[0]])\n\n return hwList\n\n # Called from drivers.py: install PAE kernel\n def installPAE(self):\n try:\n cmdPae = 'apt-get -y --force-yes install'\n for package in self.packages:\n cmdPae += ' ' + package\n self.log.write('PAE kernel install command: ' + cmdPae, 'pae.installPAE', 'debug')\n self.ec.run(cmdPae)\n\n # Rename xorg.conf\n xorg = '/etc/X11/xorg.conf'\n if os.path.exists(xorg):\n self.log.write('Rename : ' + xorg + ' -> ' + xorg + '.ddm.bak', 'pae.installPAE', 'debug')\n os.rename(xorg, xorg + '.ddm.bak')\n\n self.log.write('Done installing PAE', 'pae.installPAE', 'info')\n\n except Exception, detail:\n self.log.write(detail, 'pae.installPAE', 'error')\n\n # Called from drivers.py: remove the PAE kernel\n # TODO: I don't think this is going to work - test this\n def removePAE(self):\n try:\n kernelRelease = self.ec.run('uname -r')\n if not 'pae' in kernelRelease[0]:\n self.log.write('Not running pae, continue removal', 'pae.removePAE', 'debug')\n for package in self.packages:\n cmdPurge = 'apt-get -y --force-yes purge ' + package\n self.log.write('PAE package to remove: ' + package, 'pae.removePAE', 'info')\n self.ec.run(cmdPurge)\n self.ec.run('apt-get -y --force-yes autoremove')\n self.log.write('Done removing PAE', 'pae.removePAE', 'info')\n else:\n self.log.write('Cannot remove PAE when running PAE', 'pae.removePAE', 'warning')\n\n except Exception, detail:\n self.log.write(detail, 'pae.removePAE', 'error')\n" }, { "alpha_fraction": 0.5472872257232666, "alphanum_fraction": 0.5722779631614685, "avg_line_length": 49.77358627319336, "blob_id": "52970ae6d5402e0ac0d0c52f18a5c310bbadd9f6", "content_id": "e8faba1c0b0acdde9a5aa8af56d11775eb23cac3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10764, "license_type": "no_license", "max_line_length": 155, "num_lines": 212, "path": "/usr/lib/device-driver-manager/broadcom.py", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport re\nimport functions\nfrom execcmd import ExecCmd\n\npackageStatus = ['installed', 'notinstalled', 'uninstallable']\nhwCodes = ['nvidia', 'ati', 'broadcom', 'pae', 'mirror']\nblacklistPath = '/etc/modprobe.d/blacklist-broadcom.conf'\n\n# Chipsets and corresponding packages\n# http://linuxwireless.org/en/users/Drivers/b43\n# https://help.ubuntu.com/community/WifiDocs/Driver/bcm43xx#b43%20-%20Internet%20access\n# [ChipID, DebianPackage, UbuntuPackage]\nbcChips = [\n['576', 'firmware-brcm80211', 'bcmwl-kernel-source'],\n['4301', 'firmware-b43legacy-installer', 'firmware-b43legacy-installer'],\n['4306', 'firmware-b43legacy-installer', 'firmware-b43legacy-installer'],\n['4307', 'firmware-b43-installer', 'firmware-b43-installer'],\n['4311', 'firmware-b43-installer', 'firmware-b43-installer'],\n['4312', 'firmware-b43-installer', 'firmware-b43-installer'], # This is not a BCM4312 but BCM4311\n['4313', 'broadcom-sta-dkms', 'bcmwl-kernel-source'],\n['4315', 'firmware-b43-lpphy-installer', 'firmware-b43-lpphy-installer'], # This is BCM4312\n['4318', 'firmware-b43-installer', 'firmware-b43-installer'],\n['4319', 'firmware-b43-installer', 'firmware-b43-installer'],\n['4320', 'firmware-b43-installer', 'firmware-b43-installer'],\n['4321', 'firmware-b43-installer', 'firmware-b43-installer'],\n['4324', 'firmware-b43-installer', 'firmware-b43-installer'],\n['4325', 'firmware-b43legacy-installer', 'firmware-b43legacy-installer'],\n['4328', 'broadcom-sta-dkms', 'bcmwl-kernel-source'],\n['4329', 'broadcom-sta-dkms', 'bcmwl-kernel-source'],\n['432a', 'broadcom-sta-dkms', 'bcmwl-kernel-source'],\n['432b', 'broadcom-sta-dkms', 'bcmwl-kernel-source'],\n['432c', 'broadcom-sta-dkms', 'bcmwl-kernel-source'], # Better to use firmware-b43-installer?\n['432d', 'broadcom-sta-dkms', 'bcmwl-kernel-source'],\n['4331', 'firmware-b43-installer', 'firmware-b43-installer'],\n['4353', 'firmware-brcm80211', 'bcmwl-kernel-source'],\n['4357', 'firmware-brcm80211', 'bcmwl-kernel-source'],\n['4358', 'broadcom-sta-dkms', 'bcmwl-kernel-source'],\n['4359', 'broadcom-sta-dkms', 'bcmwl-kernel-source'],\n['435a', 'broadcom-sta-dkms', 'bcmwl-kernel-source'],\n['4727', 'firmware-brcm80211', 'bcmwl-kernel-source'], # May need blacklisting b43 on some kernels (up to 3.2?)\n['a8d6', 'firmware-b43-installer', 'firmware-b43-installer'], # Untested, but the other drivers have no support at all\n['a99d', 'broadcom-sta-dkms', 'bcmwl-kernel-source']\n]\n\n\nclass Broadcom():\n\n def __init__(self, distribution, loggerObject):\n self.distribution = distribution.lower()\n self.log = loggerObject\n self.ec = ExecCmd(self.log)\n self.status = ''\n self.currentChip = ''\n self.installableChip = ''\n self.installableDriver = ''\n self.hw = ''\n\n # Called from drivers.py: Check for Broadcom\n def getBroadcom(self):\n hwList = []\n self.setCurrentChipInfo()\n if self.hw != '':\n if self.currentChip != '':\n if self.installableChip != '':\n self.log.write('Broadcom chip serie found: ' + self.installableChip, 'broadcom.getBroadcom', 'info')\n hwList.append([self.hw, hwCodes[2], self.status])\n else:\n # Broadcom was found, but no supported chip set: return uninstallable\n hwList.append([self.hw, hwCodes[2], packageStatus[2]])\n else:\n # Broadcom was found, but no chip set was found: return uninstallable\n hwList.append([self.hw, hwCodes[2], packageStatus[2]])\n\n return hwList\n\n # Check for Broadcom chip set and set variables\n def setCurrentChipInfo(self):\n self.currentChip = ''\n self.installableDriver = ''\n self.status = ''\n\n # Get Broadcom info\n cmdBc = 'lspci | grep Broadcom'\n hwBc = self.ec.run(cmdBc)\n if hwBc:\n self.hw = hwBc[0][hwBc[0].find(': ') + 2:]\n self.log.write('Broadcom found: ' + self.hw, 'broadcom.setCurrentChipInfo', 'info')\n # Get the chip set number\n cmdPciId = 'lspci -n -d 14e4:'\n pciId = self.ec.run(cmdPciId)\n if pciId:\n chipSet = re.search('14e4:([a-zA-Z0-9]*)', pciId[0])\n if chipSet:\n self.currentChip = chipSet.group(1)\n self.log.write('Broadcom chip set found: ' + self.currentChip, 'broadcom.setCurrentChipInfo', 'debug')\n for chipList in bcChips:\n if self.currentChip == chipList[0]:\n # Supported chipset found: set variables\n self.installableChip = chipList[0]\n if self.distribution == 'debian':\n self.installableDriver = chipList[1]\n # Check if you already have wireless\n if functions.hasWireless():\n self.status = packageStatus[0]\n else:\n self.status = functions.getPackageStatus(chipList[1])\n else:\n # Assume Ubuntu\n self.installableDriver = chipList[2]\n if functions.hasWireless():\n self.status = packageStatus[0]\n else:\n self.status = functions.getPackageStatus(chipList[2])\n\n self.log.write('Broadcom driver: ' + self.installableDriver + ' (' + self.status + ')', 'broadcom.setCurrentChipInfo', 'debug')\n break\n # Check if a supported chip set is found\n if self.installableChip == '':\n self.log.write('Broadcom chipset not supported or ethernet controller: ' + self.hw, 'broadcom.setCurrentChipInfo', 'warning')\n else:\n self.log.write('Broadcom chipset not found: ' + pciId[0], 'broadcom.setCurrentChipInfo', 'warning')\n else:\n self.log.write('Broadcom pci ID not found: ' + self.hw, 'broadcom.setCurrentChipInfo', 'warning')\n\n # Install the broadcom drivers\n def installBroadcom(self):\n try:\n self.setCurrentChipInfo()\n if self.installableDriver != '':\n # Get the correct linux header package\n linHeader = functions.getLinuxHeadersAndImage()\n self.log.write('Linux header name to install: ' + linHeader[0], 'broadcom.installBroadcom', 'info')\n\n # Only install linux header if it is not installed\n if not functions.isPackageInstalled(linHeader[0]):\n self.log.write('Download package: ' + linHeader[0], 'broadcom.installBroadcom', 'info')\n self.ec.run('apt-get download ' + linHeader[0])\n\n # Download the driver and its dependencies\n cmdBc = 'apt-get download ' + self.installableDriver\n self.log.write('Download package: ' + self.installableDriver, 'broadcom.installBroadcom', 'info')\n self.ec.run(cmdBc)\n depList = functions.getPackageDependencies(self.installableDriver)\n for dep in depList:\n if not functions.isPackageInstalled(dep):\n cmdDep = 'apt-get download ' + dep\n self.log.write('Download package dependency: ' + dep, 'broadcom.installBroadcom', 'debug')\n self.ec.run(cmdDep)\n\n # Remove any module that might be in the way\n self.log.write('modprobe b44, b43, b43legacy, ssb, brcmsmac', 'broadcom.installBroadcom', 'debug')\n os.system('modprobe -rf b44')\n os.system('modprobe -rf b43')\n os.system('modprobe -rf b43legacy')\n os.system('modprobe -rf ssb')\n os.system('modprobe -rf brcmsmac')\n\n # Install the dowloaded packages\n self.log.write('Install downloaded packages', 'broadcom.installBroadcom', 'info')\n self.ec.run('dpkg -i *.deb')\n # Delete the downloaded packages\n self.log.write('Remove downloaded debs', 'broadcom.installBroadcom', 'debug')\n os.system('rm -f *.deb')\n\n # Finish up\n if self.installableDriver == 'broadcom-sta-dkms' or self.installableDriver == 'bcmwl-kernel-source':\n # Blacklist b43, brcmsmac\n self.log.write('blacklist b43 brcmsmac bcma ssb', 'broadcom.installBroadcom', 'debug')\n modFile = open(blacklistPath, 'w')\n modFile.write('blacklist b43 brcmsmac bcma ssb')\n modFile.close()\n # Start wl\n self.log.write('modprobe wl', 'broadcom.installBroadcom', 'debug')\n os.system('modprobe wl')\n elif 'b43' in self.installableDriver:\n # Start b43\n self.log.write('modprobe b43', 'broadcom.installBroadcom', 'debug')\n os.system('modprobe b43')\n else:\n # Start brcmsmac\n self.log.write('modprobe brcmsmac', 'broadcom.installBroadcom', 'debug')\n os.system('modprobe brcmsmac')\n\n self.log.write('Done installing Broadcome drivers', 'broadcom.installBroadcom', 'info')\n else:\n self.log.write('No Broadcom chip set found', 'broadcom.installBroadcom', 'error')\n\n except Exception, detail:\n self.log.write(detail, 'broadcom.installBroadcom', 'exception')\n\n # Remove the broadcom drivers\n def removeBroadcom(self):\n try:\n self.setCurrentChipInfo()\n if self.installableDriver != '':\n self.log.write('Purge driver: ' + self.installableDriver, 'broadcom.removeBroadcom', 'info')\n cmdPurge = 'apt-get -y --force-yes purge ' + self.installableDriver\n self.ec.run(cmdPurge)\n self.ec.run('apt-get -y --force-yes autoremove')\n\n # Remove blacklist Nouveau\n if os.path.exists(blacklistPath):\n self.log.write('Remove : ' + blacklistPath, 'broadcom.removeBroadcom', 'debug')\n os.remove(blacklistPath)\n\n self.log.write('Done removing Broadcome drivers', 'broadcom.removeBroadcom', 'info')\n\n except Exception, detail:\n self.log.write(detail, 'broadcom.removeBroadcom', 'exception')\n" }, { "alpha_fraction": 0.569524884223938, "alphanum_fraction": 0.5712630152702332, "avg_line_length": 43.25640869140625, "blob_id": "334b4587c21dabd6f12d8fca736427e3f86fc447", "content_id": "68b61ed38787deef3d935301c8e03dd626e4f525", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3452, "license_type": "no_license", "max_line_length": 135, "num_lines": 78, "path": "/usr/lib/device-driver-manager/mirror.py", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport re\nimport functions\nfrom execcmd import ExecCmd\n\npackageStatus = ['installed', 'notinstalled', 'uninstallable']\nhwCodes = ['nvidia', 'ati', 'broadcom', 'pae', 'mirror']\n\n\nclass Mirror():\n def __init__(self, distribution, loggerObject, currentMirror='', bestMirror=''):\n self.distribution = distribution.lower()\n self.log = loggerObject\n self.ec = ExecCmd(self.log)\n self.currentMirror = currentMirror\n self.bestMirror = bestMirror\n\n def getFastestMirror(self):\n mirList = []\n if self.distribution == 'debian':\n # Check if mint-debian-mirrors is installed\n if functions.isPackageInstalled('mint-debian-mirrors'):\n # Get the mirrors\n cmd = 'mint-choose-debian-mirror --dry-run'\n mirrors = self.ec.run(cmd)\n for mirror in mirrors:\n # Extract the url\n urlObj = re.search('http[a-zA-Z0-9:\\.\\-/]*', mirror)\n if urlObj:\n url = urlObj.group()\n if 'current server' in mirror.lower():\n self.log.write('Current server: ' + url, 'mirror.getFastestMirror', 'info')\n self.currentMirror = url\n elif 'best server' in mirror.lower():\n self.log.write('Best server: ' + url, 'mirror.getFastestMirror', 'info')\n self.bestMirror = url\n else:\n self.log.write('No mirror URL found', 'mirror.getFastestMirror', 'warning')\n else:\n self.log.write('Cannot detect fastest mirror: mint-debian-mirrors not installed', 'mirror.getFastestMirror', 'warning')\n else:\n # TODO: do this for Ubuntu\n pass\n\n # Append fastest mirror to list\n status = packageStatus[2]\n if self.bestMirror != '':\n if self.bestMirror == self.currentMirror:\n status = packageStatus[0]\n else:\n status = packageStatus[1]\n mirList.append(['Install the fastest repository mirror', hwCodes[4], status])\n\n return mirList\n\n # Let mint-debian-mirrors write the fastest mirror to sources.list\n def installMirror(self):\n cmd = 'mint-choose-debian-mirror --force-fastest'\n self.log.write('Mirror command=' + cmd, 'mirror.installMirror', 'debug')\n self.ec.run(cmd)\n self.log.write('Resynchronizing the package index files from their sources', 'mirror.installMirror', 'info')\n os.system(\"apt-get update\")\n\n # Restore the sources.list backup file\n def removeMirror(self):\n sourcesFile = '/etc/apt/sources.list'\n bakFile = '/etc/apt/sources.list.bk'\n if os.path.exists(bakFile):\n self.log.write('Restore backup file: ' + bakFile, 'mirror.removeMirror', 'info')\n if os.path.exists(sourcesFile):\n self.ec.run('mv -fv ' + sourcesFile + ' ' + sourcesFile + '.ddm.bk')\n self.ec.run('mv -fv ' + bakFile + ' ' + sourcesFile)\n self.log.write('Resynchronize the package index files from their sources', 'mirror.removeMirror', 'info')\n self.ec.run('apt-get update')\n else:\n self.log.write('Cannot restore sources.list backup file: does not exist', 'mirror.removeMirror', 'warning')\n" }, { "alpha_fraction": 0.7113401889801025, "alphanum_fraction": 0.7113401889801025, "avg_line_length": 18, "blob_id": "d2e93b5e7901e4445ded9ed57fe08263d94831ed", "content_id": "042eec7dce883aa19970ec70c81c0506e07f3487", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 97, "license_type": "no_license", "max_line_length": 42, "num_lines": 5, "path": "/test", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nsudo rm -rf /usr/lib/device-driver-manager\nsudo cp -R usr /\ndevice-driver-manager\n\n\n" }, { "alpha_fraction": 0.5853658318519592, "alphanum_fraction": 0.5936354994773865, "avg_line_length": 44.708133697509766, "blob_id": "ef6c7e9eb2ef54f34b5cb60bae689c8905a2e2fa", "content_id": "3ef8a760193229481b6f8dfdaa9c1164823dfa98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9553, "license_type": "no_license", "max_line_length": 125, "num_lines": 209, "path": "/usr/lib/device-driver-manager/nvidia.py", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport functions\nimport nvidia_gpus\nfrom execcmd import ExecCmd\n\npackageStatus = ['installed', 'notinstalled', 'uninstallable']\nhwCodes = ['nvidia', 'ati', 'broadcom', 'pae', 'mirror']\nblacklistPath = '/etc/modprobe.d/blacklist-nouveau.conf'\n\n# Nvidia drivers\n# driver serial nr, debian driver, ubuntu driver\ndrivers = [\n[304, 'nvidia-glx', 'nvidia-current'],\n[173, 'nvidia-glx-legacy-173xx', 'nvidia-173'],\n[96, 'nvidia-glx-legacy-96xx', 'nvidia-96']\n]\n\n\nclass Nvidia():\n def __init__(self, distribution, loggerObject):\n self.distribution = distribution.lower()\n self.log = loggerObject\n self.ec = ExecCmd(self.log)\n # Get gpu info\n self.gpu = []\n manPciId = functions.getGraphicsCardManufacturerPciId()\n if manPciId:\n if manPciId[0].lower() == '10de': # Nividia manufacturer id\n self.gpu = nvidia_gpus.checkNvidiaID(manPciId[1])\n self.log.write('Nvidia driver info: ' + str(self.gpu), 'nvidia.init', 'debug')\n\n # Called from drivers.py: Check for Nvidia\n def getNvidia(self):\n hwList = []\n if self.gpu:\n # Get driver for Nvidia\n self.log.write('Get the appropriate Nvidia driver', 'nvidia.getNvidia', 'info')\n drv = self.getDriver()\n if drv != '':\n self.log.write('Nvidia driver to install: ' + drv, 'nvidia.getNvidia', 'info')\n status = functions.getPackageStatus(drv)\n self.log.write('Package status: ' + status, 'nvidia.getNvidia', 'debug')\n hwList.append([self.gpu[1], hwCodes[0], status])\n else:\n self.log.write('No supported driver found for: ' + self.gpu[1], 'nvidia.getNvidia', 'warning')\n hwList.append([self.gpu[1], hwCodes[0], packageStatus[2]])\n else:\n self.log.write('No supported Nvidia card found', 'nvidia.getNvidia', 'debug')\n\n return hwList\n\n # Get the driver for the system's Nvidia card\n def getDriver(self):\n try:\n driver = ''\n for drv in drivers:\n if drv[0] == self.gpu[0]:\n if self.distribution == 'debian':\n driver = drv[1]\n else:\n driver = drv[2]\n break\n return driver\n except Exception, detail:\n self.log.write(detail, 'nvidia.getDriver', 'exception')\n\n # Install the given packages\n def installNvidiaDriver(self, packageList):\n try:\n # Remove certain packages before installing the drivers\n for package in packageList:\n if package[1] == 1:\n if functions.isPackageInstalled(package[0]):\n self.log.write('Remove package: ' + package[0], 'nvidia.installNvidiaDriver', 'debug')\n self.ec.run('apt-get -y --force-yes remove ' + package[0])\n\n # Preseed answers for some packages\n self.preseedNvidiaPackages('install')\n\n # Install the packages\n installString = ''\n notInRepo = ''\n for package in packageList:\n chkStatus = functions.getPackageStatus(package[0])\n if chkStatus != packageStatus[2]:\n installString += ' ' + package[0]\n elif package[1] != 2:\n notInRepo += ', ' + package[0]\n\n if notInRepo == '':\n self.ec.run('apt-get -y --force-yes install' + installString)\n else:\n self.log.write('Install aborted: not in repository: ' + notInRepo[2:], 'nvidia.installNvidiaDriver', 'error')\n\n except Exception, detail:\n self.log.write(detail, 'nvidia.installNvidiaDriver', 'exception')\n\n # Get additional packages\n # The second value in the list is a numerical value:\n # 0 = Need to install, but removal before reinstallation is not needed\n # 1 = Need to install and removal is needed before reinstallation\n # 2 = Optional install\n def getAdditionalPackages(self, driver):\n drvList = []\n # Get the correct linux header package\n linHeader = functions.getLinuxHeadersAndImage()\n drvList.append([linHeader[0], 0])\n # Distribution specific packages\n if self.distribution == 'debian':\n drvList.append(['build-essential', 0])\n drvList.append([driver, 1])\n if driver == 'nvidia-glx':\n drvList.append(['nvidia-kernel-dkms', 1])\n elif driver == 'nvidia-glx-legacy-96xx':\n drvList.append(['nvidia-kernel-legacy-96xx-dkms', 1])\n elif driver == 'nvidia-glx-legacy-173xx':\n drvList.append(['nvidia-kernel-legacy-173xx-dkms', 1])\n drvList.append(['nvidia-xconfig', 0])\n drvList.append(['nvidia-glx-ia32', 2])\n else:\n drvList.append([driver, 1])\n\n # Common packages\n drvList.append(['nvidia-settings', 0])\n return drvList\n\n # Called from drivers.py: install the Nvidia drivers\n def installNvidia(self):\n try:\n # Get the appropriate drivers for the card\n drv = self.getDriver()\n if drv != '':\n packages = self.getAdditionalPackages(drv)\n self.installNvidiaDriver(packages)\n # Install the appropriate drivers\n if self.distribution == 'debian':\n # Configure Nvidia\n self.log.write('Configure Nvidia...', 'nvidia.installNvidia', 'debug')\n self.ec.run('nvidia-xconfig')\n\n # Blacklist Nouveau\n self.log.write('Blacklist Nouveau: ' + blacklistPath, 'nvidia.installNvidia', 'debug')\n modFile = open(blacklistPath, 'w')\n modFile.write('blacklist nouveau')\n modFile.close()\n\n self.log.write('Done installing Nvidia drivers', 'nvidia.installNvidia', 'info')\n else:\n self.log.write('No apprpriate driver found', 'nvidia.installNvidia', 'error')\n\n except Exception, detail:\n self.log.write(detail, 'nvidia.installNvidia', 'exception')\n\n # Called from drivers.py: remove the Nvidia drivers and revert to Nouveau\n def removeNvidia(self):\n try:\n # Preseed answers for some packages\n self.preseedNvidiaPackages('purge')\n\n self.log.write('Removing Nvidia drivers', 'nvidia.removeNvidia', 'info')\n packages = self.getAdditionalPackages(self.getDriver())\n for package in packages:\n self.log.write('Remove package: ' + package[0], 'nvidia.removeNvidia', 'debug')\n self.ec.run('apt-get -y --force-yes purge ' + package[0])\n self.ec.run('apt-get -y --force-yes autoremove')\n self.ec.run('apt-get -y --force-yes install xserver-xorg-video-nouveau')\n\n # Remove blacklist Nouveau\n if os.path.exists(blacklistPath):\n self.log.write('Remove : ' + blacklistPath, 'nvidia.removeNvidia', 'debug')\n os.remove(blacklistPath)\n\n # Rename xorg.conf\n xorg = '/etc/X11/xorg.conf'\n if os.path.exists(xorg):\n self.log.write('Rename : ' + xorg + ' -> ' + xorg + '.ddm.bak', 'nvidia.removeNvidia', 'debug')\n os.rename(xorg, xorg + '.ddm.bak')\n\n self.log.write('Done removing Nvidia drivers', 'nvidia.removeNvidia', 'info')\n\n except Exception, detail:\n self.log.write(detail, 'nvidia.removeNvidia', 'exception')\n\n def preseedNvidiaPackages(self, action):\n if self.distribution == 'debian':\n # Run on configured system and debconf-utils installed:\n # debconf-get-selections | grep nvidia > debconf-nvidia.seed\n # replace tabs with spaces and change the default answers (note=space, boolean=true or false)\n debConfList = []\n debConfList.append('nvidia-support nvidia-support/warn-nouveau-module-loaded note ')\n debConfList.append('nvidia-support nvidia-support/check-xorg-conf-on-removal boolean false')\n debConfList.append('nvidia-support nvidia-support/check-running-module-version boolean true')\n debConfList.append('nvidia-installer-cleanup nvidia-installer-cleanup/delete-nvidia-installer boolean true')\n debConfList.append('nvidia-installer-cleanup nvidia-installer-cleanup/remove-conflicting-libraries boolean true')\n debConfList.append('nvidia-support nvidia-support/removed-but-enabled-in-xorg-conf note ')\n debConfList.append('nvidia-support nvidia-support/warn-mismatching-module-version note ')\n debConfList.append('nvidia-support nvidia-support/last-mismatching-module-version string 302.17')\n debConfList.append('nvidia-support nvidia-support/needs-xorg-conf-to-enable note ')\n debConfList.append('nvidia-support nvidia-support/create-nvidia-conf boolean true')\n debConfList.append('nvidia-installer-cleanup nvidia-installer-cleanup/uninstall-nvidia-installer boolean true')\n\n # Add each line to the debconf database\n for line in debConfList:\n os.system('echo \"' + line + '\" | debconf-set-selections')\n\n # Install or remove the packages\n self.ec.run('apt-get -y --force-yes ' + action + ' nvidia-support nvidia-installer-cleanup')\n" }, { "alpha_fraction": 0.796407163143158, "alphanum_fraction": 0.802395224571228, "avg_line_length": 36.22222137451172, "blob_id": "d87d492ee5d2144f61e8c13c50eef810b2606e40", "content_id": "1e689cbbac4974a530ee2ed64bb299bd2510ed17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 334, "license_type": "no_license", "max_line_length": 96, "num_lines": 9, "path": "/README.md", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "Third-party driver manager.\n\nCurrently Nvidia, ATI and Broadcom are supported.\nIt also checks whether or not the PAE kernel can be installed on multi-processor 32-bit systems.\n\nDDM uses the repositories to download and install the appropriate packages.\n\nDebug run: device-driver-manager -d\nThis will generate a log file: $HOME/ddm.log" }, { "alpha_fraction": 0.5729590654373169, "alphanum_fraction": 0.5819584131240845, "avg_line_length": 35.17829513549805, "blob_id": "32e6ea8e146f3f5467297f5e5cda737bb7bce127", "content_id": "6e9964d5e0e61c32ffdcb47f0d04ec906f9a3ceb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4667, "license_type": "no_license", "max_line_length": 84, "num_lines": 129, "path": "/usr/lib/device-driver-manager/drivers.py", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport threading\nimport functions\nfrom mirror import Mirror\nfrom nvidia import Nvidia\nfrom ati import ATI\nfrom broadcom import Broadcom\nfrom pae import PAE\n\npackageStatus = ['installed', 'notinstalled', 'uninstallable']\nhwCodes = ['nvidia', 'ati', 'broadcom', 'pae', 'mirror']\n\n\n# Class to check for supported drivers\nclass DriverCheck():\n def __init__(self, loggerObject):\n self.log = loggerObject\n self.log.write('Initialize DriverCheck', 'drivers.DriverCheck', 'debug')\n self.distribution = functions.getDistribution()\n\n # This will only check for Nvidia, ATI, Broadcom and PAE\n def run(self):\n hwList = []\n\n #mir = Mirror(self.distribution, self.log)\n nv = Nvidia(self.distribution, self.log)\n ati = ATI(self.distribution, self.log)\n bc = Broadcom(self.distribution, self.log)\n pae = PAE(self.distribution, self.log)\n\n # Collect supported hardware\n #mirror = mir.getFastestMirror()\n hwNvidia = nv.getNvidia()\n hwATI = ati.getATI()\n hwBroadcom = bc.getBroadcom()\n hwPae = pae.getPae()\n\n # Combine all found hardware in a single list\n #for line in mirror:\n # hwList.append(line)\n for line in hwPae:\n hwList.append(line)\n for line in hwNvidia:\n hwList.append(line)\n for line in hwATI:\n hwList.append(line)\n for line in hwBroadcom:\n hwList.append(line)\n\n return hwList\n\n\n# Driver install class needs threading\nclass DriverInstall(threading.Thread):\n def __init__(self, hwCodesWithStatusList, loggerObject):\n threading.Thread.__init__(self)\n self.log = loggerObject\n self.log.write('Initialize DriverInstall', 'drivers.DriverInstall', 'debug')\n self.hwCodesWithStatusList = hwCodesWithStatusList\n self.distribution = functions.getDistribution()\n\n # Install hardware drivers for given hardware codes (hwCodes)\n def run(self):\n # Instantiate driver classes\n mir = Mirror(self.distribution, self.log)\n nv = Nvidia(self.distribution, self.log)\n ati = ATI(self.distribution, self.log)\n bc = Broadcom(self.distribution, self.log)\n pae = PAE(self.distribution, self.log)\n\n # First check for mirror\n for code in self.hwCodesWithStatusList:\n if code[0] == hwCodes[4]:\n if code[1] != packageStatus[2]:\n mir.installMirror()\n\n # Now install the hardware drivers\n for code in self.hwCodesWithStatusList:\n # First check for mirror\n if code[0] != hwCodes[4]:\n if code[0] == hwCodes[0]:\n if code[1] != packageStatus[2]:\n nv.installNvidia()\n elif code[0] == hwCodes[1]:\n if code[1] != packageStatus[2]:\n ati.installATI()\n elif code[0] == hwCodes[2]:\n if code[1] != packageStatus[2]:\n bc.installBroadcom()\n elif code[0] == hwCodes[3]:\n if code[1] != packageStatus[2]:\n pae.installPAE()\n\n\n# Driver install class needs threading\nclass DriverRemove(threading.Thread):\n def __init__(self, hwCodesWithStatusList, loggerObject):\n threading.Thread.__init__(self)\n self.log = loggerObject\n self.log.write('Initialize DriverRemove', 'drivers.DriverRemove', 'debug')\n self.hwCodesWithStatusList = hwCodesWithStatusList\n self.distribution = functions.getDistribution()\n\n # Install hardware drivers for given hardware codes (hwCodes)\n def run(self):\n # Instantiate driver classes\n mir = Mirror(self.distribution, self.log)\n nv = Nvidia(self.distribution, self.log)\n ati = ATI(self.distribution, self.log)\n bc = Broadcom(self.distribution, self.log)\n pae = PAE(self.distribution, self.log)\n\n for code in self.hwCodesWithStatusList:\n if code[0] == hwCodes[0]:\n if code[1] == packageStatus[0]:\n nv.removeNvidia()\n elif code[0] == hwCodes[1]:\n if code[1] == packageStatus[0]:\n ati.removeATI()\n elif code[0] == hwCodes[2]:\n if code[1] == packageStatus[0]:\n bc.removeBroadcom()\n elif code[0] == hwCodes[3]:\n if code[1] == packageStatus[0]:\n pae.removePAE()\n if code[0] == hwCodes[4]:\n if code[1] != packageStatus[2]:\n mir.removeMirror()\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7481481432914734, "avg_line_length": 43.66666793823242, "blob_id": "dd42868c7d5f369768dcb6d40800d64bb592aee3", "content_id": "71019f4b1179b21d4f79be1632f188bcfb82616c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 135, "license_type": "no_license", "max_line_length": 64, "num_lines": 3, "path": "/usr/bin/device-driver-manager", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# Launch device-driver-manager with all passed arguments\n/usr/bin/python2.7 -tt /usr/lib/device-driver-manager/main.py $*\n\n" }, { "alpha_fraction": 0.5782670974731445, "alphanum_fraction": 0.581936776638031, "avg_line_length": 34.389610290527344, "blob_id": "f3868064bce0206a6476fbc9afa2bfd9bc2291d5", "content_id": "3d63ac81ec46f511986d6063c161deb9939b50e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24525, "license_type": "no_license", "max_line_length": 181, "num_lines": 693, "path": "/usr/lib/device-driver-manager/functions.py", "repo_name": "linuxer9/device-driver-manager", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python -u\n\nimport os\nimport sys\nimport re\nimport operator\nimport string\nfrom execcmd import ExecCmd\ntry:\n import gtk\nexcept Exception, detail:\n print detail\n sys.exit(1)\n\navlThemesSearchstr = 'plymouth-themes'\npackageStatus = ['installed', 'notinstalled', 'uninstallable']\ngraphicsCard = None\n\n# Logging object set from parent\nlog = object\n\n# General ================================================\n\n\ndef repaintGui():\n # Force repaint: ugly, but gui gets repainted so fast that gtk objects don't show it\n while gtk.events_pending():\n gtk.main_iteration(False)\n\n\n# Return the type string of a object\ndef getTypeString(object):\n tpString = ''\n tp = str(type(object))\n matchObj = re.search(\"'(.*)'\", tp)\n if matchObj:\n tpString = matchObj.group(1)\n return tpString\n\n\n# Convert string to number\ndef strToNumber(string, toInt=False):\n nr = 0\n try:\n if toInt:\n nr = int(string)\n else:\n nr = float(string)\n except ValueError:\n nr = 0\n return nr\n\n\n# Check if parameter is a list\ndef isList(lst):\n return isinstance(lst, list)\n\n\n# Check if parameter is a list containing lists\ndef isListOfLists(lst):\n return len(lst) == len([x for x in lst if isList(x)])\n\n\n# Sort list on given column\ndef sortListOnColumn(lst, columsList):\n for col in reversed(columsList):\n lst = sorted(lst, key=operator.itemgetter(col))\n return lst\n\n\n# Return a list with images from a given path\ndef getImgsFromDir(directoryPath):\n extensions = ['.png', '.jpg', '.jpeg', '.gif']\n log.write('Search for extensions: ' + str(extensions), 'functions.getImgsFromDir', 'debug')\n files = os.listdir(directoryPath)\n img = []\n for file in files:\n for ext in extensions:\n if os.path.splitext(file)[1] == ext:\n path = os.path.join(directoryPath, file)\n img.append(path)\n log.write('Image found: ' + path, 'functions.getImgsFromDir', 'debug')\n break\n return img\n\n\n# TreeView ==============================================\n\n# Clear treeview\ndef clearTreeView(treeview):\n liststore = treeview.get_model()\n if liststore is not None:\n liststore.clear()\n treeview.set_model(liststore)\n\n\n# General function to fill a treeview\n# Set setCursorWeight to 400 if you don't want bold font\ndef fillTreeview(treeview, contentList, columnTypesList, columnHideList=[-1], setCursor=0, setCursorWeight=400, firstItemIsColName=False, appendToExisting=False, appendToTop=False):\n # Check if this is a multi-dimensional array\n multiCols = isListOfLists(contentList)\n colNameList = []\n\n if len(contentList) > 0:\n liststore = treeview.get_model()\n if liststore is None:\n # Dirty but need to dynamically create a list store\n dynListStore = 'gtk.ListStore('\n for i in range(len(columnTypesList)):\n dynListStore += str(columnTypesList[i]) + ', '\n dynListStore += 'int)'\n log.write('Create list store eval string: ' + dynListStore, 'functions.fillTreeview', 'debug')\n liststore = eval(dynListStore)\n else:\n if not appendToExisting:\n # Existing list store: clear all rows\n log.write('Clear existing list store', 'functions.fillTreeview', 'debug')\n liststore.clear()\n\n # Create list with column names\n if multiCols:\n for i in range(len(contentList[0])):\n if firstItemIsColName:\n log.write('First item is column name (multi-column list): ' + contentList[0][i], 'functions.fillTreeview', 'debug')\n colNameList.append(contentList[0][i])\n else:\n colNameList.append('Column ' + str(i))\n else:\n if firstItemIsColName:\n log.write('First item is column name (single-column list): ' + contentList[0][i], 'functions.fillTreeview', 'debug')\n colNameList.append(contentList[0])\n else:\n colNameList.append('Column 0')\n\n log.write('Create column names: ' + str(colNameList), 'functions.fillTreeview', 'debug')\n\n # Add data to the list store\n for i in range(len(contentList)):\n # Skip first row if that is a column name\n skip = False\n if firstItemIsColName and i == 0:\n log.write('First item is column name: skip first item', 'functions.fillTreeview', 'debug')\n skip = True\n\n if not skip:\n w = 400\n if i == setCursor:\n w = setCursorWeight\n if multiCols:\n # Dynamically add data for multi-column list store\n if appendToTop:\n dynListStoreAppend = 'liststore.insert(0, ['\n else:\n dynListStoreAppend = 'liststore.append( ['\n for j in range(len(contentList[i])):\n val = str(contentList[i][j])\n if str(columnTypesList[j]) == 'str':\n val = '\"' + val + '\"'\n if str(columnTypesList[j]) == 'gtk.gdk.Pixbuf':\n val = 'gtk.gdk.pixbuf_new_from_file(\"' + val + '\")'\n dynListStoreAppend += val + ', '\n dynListStoreAppend += str(w) + '] )'\n\n log.write('Add data to list store (single-column list): ' + dynListStoreAppend, 'functions.fillTreeview', 'debug')\n eval(dynListStoreAppend)\n else:\n if appendToTop:\n log.write('Add data to top of list store (single-column list): ' + str(contentList[i]), 'functions.fillTreeview', 'debug')\n liststore.insert(0, [contentList[i], w])\n else:\n log.write('Add data to bottom of list store (single-column list): ' + str(contentList[i]), 'functions.fillTreeview', 'debug')\n liststore.append([contentList[i], w])\n\n # Check last visible column\n lastVisCol = -1\n for i in xrange(len(colNameList), 0, -1):\n if i in columnHideList:\n lastVisCol = i - 1\n log.write('Last visible column nr: ' + str(lastVisCol), 'functions.fillTreeview', 'debug')\n break\n\n # Create columns\n for i in range(len(colNameList)):\n # Check if we have to hide this column\n skip = False\n for colNr in columnHideList:\n if colNr == i:\n log.write('Hide column nr: ' + str(colNr), 'functions.fillTreeview', 'debug')\n skip = True\n\n if not skip:\n # Create a column only if it does not exist\n colFound = ''\n cols = treeview.get_columns()\n for col in cols:\n if col.get_title() == colNameList[i]:\n colFound = col.get_title()\n break\n\n if colFound == '':\n # Build renderer and attributes to define the column\n # Possible attributes for text: text, foreground, background, weight\n attr = ', text=' + str(i) + ', weight=' + str(len(colNameList))\n renderer = 'gtk.CellRendererText()' # an object that renders text into a gtk.TreeView cell\n if str(columnTypesList[i]) == 'bool':\n renderer = 'gtk.CellRendererToggle()' # an object that renders a toggle button into a TreeView cell\n attr = ', active=' + str(i)\n if str(columnTypesList[i]) == 'gtk.gdk.Pixbuf':\n renderer = 'gtk.CellRendererPixbuf()' # an object that renders a pixbuf into a gtk.TreeView cell\n attr = ', pixbuf=' + str(i)\n dynCol = 'gtk.TreeViewColumn(\"' + str(colNameList[i]) + '\", ' + renderer + attr + ')'\n\n log.write('Create column: ' + dynCol, 'functions.fillTreeview', 'debug')\n col = eval(dynCol)\n\n # Get the renderer of the column and add type specific properties\n rend = col.get_cell_renderers()[0]\n #if str(columnTypesList[i]) == 'str':\n # TODO: Right align text in column - add parameter to function\n #rend.set_property('xalign', 1.0)\n if str(columnTypesList[i]) == 'bool':\n # If checkbox column, add toggle function\n log.write('Check box found: add toggle function', 'functions.fillTreeview', 'debug')\n rend.connect('toggled', tvchk_on_toggle, liststore, i)\n\n # Let the last colum fill the treeview\n if i == lastVisCol:\n log.write('Last column fills treeview: ' + str(lastVisCol), 'functions.fillTreeview', 'debug')\n col.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)\n\n # Finally add the column\n treeview.append_column(col)\n log.write('Column added: ' + col.get_title(), 'functions.fillTreeview', 'debug')\n else:\n log.write('Column already exists: ' + colFound, 'functions.fillTreeview', 'debug')\n\n # Add liststore, set cursor and set the headers\n treeview.set_model(liststore)\n treeview.set_cursor(setCursor)\n treeview.set_headers_visible(firstItemIsColName)\n log.write('Add Liststrore to Treeview', 'functions.fillTreeview', 'debug')\n\n # Scroll to selected cursor\n selection = treeview.get_selection()\n tm, treeIter = selection.get_selected()\n path = tm.get_path(treeIter)\n treeview.scroll_to_cell(path)\n log.write('Scrolled to selected row: ' + str(setCursor), 'functions.fillTreeview', 'debug')\n\n\ndef tvchk_on_toggle(cell, path, liststore, colNr, *ignore):\n if path is not None:\n it = liststore.get_iter(path)\n liststore[it][colNr] = not liststore[it][colNr]\n\n\n# Get the selected value in a treeview\ndef getSelectedValue(treeView, colNr=0):\n # Assume single row selection\n (model, pathlist) = treeView.get_selection().get_selected_rows()\n return model.get_value(model.get_iter(pathlist[0]), colNr)\n\n\n# Return all the values in a given column\ndef getColumnValues(treeView, colNr=0):\n cv = []\n model = treeView.get_model()\n itr = model.get_iter_first()\n while itr is not None:\n cv.append(model.get_value(itr, colNr))\n itr = model.iter_next(itr)\n return cv\n\n\n# Deselect all drivers, except PAE\ndef treeviewToggleAll(treeView, toggleColNr, toggleValue=False, excludeColNr=-1, excludeValue=''):\n model = treeView.get_model()\n itr = model.get_iter_first()\n while itr is not None:\n if excludeColNr >= 0:\n exclVal = model.get_value(itr, excludeColNr)\n if exclVal != excludeValue:\n model[itr][toggleColNr] = toggleValue\n else:\n model[itr][toggleColNr] = toggleValue\n itr = model.iter_next(itr)\n\n\n# Statusbar =====================================================\n\ndef pushMessage(statusbar, message, contextString='message'):\n context = statusbar.get_context_id(contextString)\n statusbar.push(context, message)\n\n\ndef popMessage(statusbar, contextString='message'):\n context = statusbar.get_context_id(contextString)\n statusbar.pop(context)\n\n\n# System ========================================================\n\n# Get linux-headers and linux-image package names\n# If getLatest is set to True, the latest version of the packages is returned rather than the packages for the currently booted kernel.\n# includeLatestRegExp is a regular expression that must be part of the package name (in conjuction with getLatest=True).\n# excludeLatestRegExp is a regular expression that must NOT be part of the package name (in conjuction with getLatest=True).\ndef getLinuxHeadersAndImage(getLatest=False, includeLatestRegExp='', excludeLatestRegExp=''):\n returnList = []\n lhList = []\n ec = ExecCmd(log)\n if getLatest:\n lst = ec.run('aptitude search linux-headers', False)\n for item in lst:\n lhMatch = re.search('linux-headers-\\d+\\.[a-zA-Z0-9-\\.]*', item)\n if lhMatch:\n lh = lhMatch.group(0)\n addLh = True\n if includeLatestRegExp != '':\n inclMatch = re.search(includeLatestRegExp, lh)\n if not inclMatch:\n addLh = False\n if excludeLatestRegExp != '':\n exclMatch = re.search(excludeLatestRegExp, lh)\n if exclMatch:\n addLh = False\n\n # Append to list\n if addLh:\n lhList.append(lh)\n else:\n # Get the current linux header package\n linHeader = ec.run(\"echo linux-headers-$(uname -r)\", False)\n lhList.append(linHeader[0])\n\n # Sort the list and add the linux-image package name\n if lhList:\n lhList.sort(reverse=True)\n returnList.append(lhList[0])\n returnList.append('linux-image-' + lhList[0][14:])\n return returnList\n\n\n# Get the system's graphic card\ndef getGraphicsCard():\n global graphicsCard\n if graphicsCard is None:\n cmdGraph = 'lspci | grep VGA'\n ec = ExecCmd(log)\n hwGraph = ec.run(cmdGraph, False)\n for line in hwGraph:\n graphicsCard = line[line.find(': ') + 2:]\n break\n return graphicsCard\n\n\ndef getGraphicsCardManufacturerPciId():\n pciId = []\n cmdGraph = 'lspci -nn | grep VGA'\n ec = ExecCmd(log)\n hwGraph = ec.run(cmdGraph, False)\n if hwGraph:\n idMatch = re.search('\\[(\\w*):(\\w*)\\]', hwGraph[0])\n if idMatch:\n pciId.append(idMatch.group(1))\n pciId.append(idMatch.group(2))\n return pciId\n\n\n# Get system version information\ndef getSystemVersionInfo():\n info = ''\n try:\n ec = ExecCmd(log)\n infoList = ec.run('cat /proc/version', False)\n if infoList:\n info = infoList[0]\n except Exception, detail:\n log.write(detail, 'functions.getSystemVersionInfo', 'error')\n return info\n\n\n# Get the system's distribution\ndef getDistribution():\n distribution = ''\n sysInfo = getSystemVersionInfo().lower()\n if 'debian' in sysInfo:\n distribution = 'debian'\n elif 'ubuntu' in sysInfo:\n distribution = 'ubuntu'\n return distribution\n\n\n# Get the system's distribution\ndef getDistributionDescription():\n distribution = ''\n try:\n cmdDist = 'cat /etc/*-release | grep DISTRIB_DESCRIPTION'\n ec = ExecCmd(log)\n dist = ec.run(cmdDist, False)[0]\n distribution = dist[dist.find('=') + 1:]\n distribution = string.replace(distribution, '\"', '')\n except Exception, detail:\n log.write(detail, 'functions.getDistributionDescription', 'error')\n return distribution\n\n\n# Get the system's distribution\ndef getDistributionReleaseNumber():\n release = 0\n try:\n cmdRel = 'cat /etc/*-release | grep DISTRIB_RELEASE'\n ec = ExecCmd(log)\n relLst = ec.run(cmdRel, False)\n if relLst:\n rel = relLst[0]\n release = rel[rel.find('=') + 1:]\n release = string.replace(release, '\"', '')\n release = strToNumber(release)\n except Exception, detail:\n log.write(detail, 'functions.getDistributionReleaseNumber', 'error')\n return release\n\n\n# Get the system's desktop\ndef getDesktopEnvironment():\n desktop = os.environ.get('DESKTOP_SESSION')\n if desktop is None or desktop == 'default':\n # Dirty: KDE_FULL_SESSION does not always exist: also check if kdm exists\n if 'KDE_FULL_SESSION' in os.environ or os.path.isfile('/usr/bin/kdm'):\n desktop = 'kde'\n elif 'GNOME_DESKTOP_SESSION_ID' in os.environ or 'XDG_CURRENT_DESKTOP' in os.environ:\n desktop = 'gnome'\n elif 'MATE_DESKTOP_SESSION_ID' in os.environ:\n desktop = 'mate'\n return desktop\n\n\n# Get valid screen resolutions\ndef getResolutions(minRes='', maxRes='', reverseOrder=False):\n cmd = 'xrandr'\n ec = ExecCmd(log)\n cmdList = ec.run(cmd, False)\n avlRes = []\n avlResTmp = []\n minW = 0\n minH = 0\n maxW = 0\n maxH = 0\n\n # Split the minimum and maximum resolutions\n if 'x' in minRes:\n minResList = minRes.split('x')\n minW = strToNumber(minResList[0], True)\n minH = strToNumber(minResList[1], True)\n if 'x' in maxRes:\n maxResList = maxRes.split('x')\n maxW = strToNumber(maxResList[0], True)\n maxH = strToNumber(maxResList[1], True)\n\n # Fill the list with screen resolutions\n for line in cmdList:\n for item in line.split():\n if item and 'x' in item and len(item) > 2 and not '+' in item and not 'axis' in item and not 'maximum' in item:\n log.write('Resolution found: ' + item, 'functions.getResolutions', 'debug')\n itemList = item.split('x')\n itemW = strToNumber(itemList[0], True)\n itemH = strToNumber(itemList[1], True)\n # Check if it can be added\n if itemW >= minW and itemH >= minH and (maxW == 0 or itemW <= maxW) and (maxH == 0 or itemH <= maxH):\n log.write('Resolution added: ' + item, 'functions.getResolutions', 'debug')\n avlResTmp.append([itemW, itemH])\n\n # Sort the list and return as readable resolution strings\n avlResTmp.sort(key=operator.itemgetter(0), reverse=reverseOrder)\n for res in avlResTmp:\n avlRes.append(str(res[0]) + 'x' + str(res[1]))\n return avlRes\n\n\n# Get current Plymouth resolution\ndef getCurrentResolution():\n res = ''\n boot = getBoot()\n path = os.path.join('/etc/default', boot)\n regExp = 'mode_option=(.*)-'\n\n if os.path.isfile(path):\n grubfile = open(path, 'r')\n text = grubfile.read()\n grubfile.close()\n # Search text for resolution\n matchObj = re.search(regExp, text)\n if matchObj:\n res = matchObj.group(1)\n log.write('Current Plymouth resolution: ' + res, 'functions.getCurrentResolution', 'debug')\n else:\n log.write('Neither grub nor burg found in /etc/default', 'functions.getCurrentResolution', 'error')\n\n return res\n\n\n# Get the bootloader\ndef getBoot():\n grubPath = '/etc/default/grub'\n burgPath = '/etc/default/burg'\n if os.path.isfile(grubPath): # Grub\n return 'grub'\n elif os.path.isfile(burgPath): # Burg\n return 'burg'\n else:\n return ''\n\n\n# Check the status of a package\ndef getPackageStatus(packageName):\n status = ''\n try:\n cmdChk = 'apt-cache policy ' + str(packageName)\n ec = ExecCmd(log)\n packageCheck = ec.run(cmdChk, False)\n\n for line in packageCheck:\n instChk = re.search('installed:.*\\d.*', line.lower())\n if not instChk:\n instChk = re.search('installed.*', line.lower())\n if instChk:\n # Package is not installed\n log.write('Package not installed: ' + str(packageName), 'drivers.getPackageStatus', 'debug')\n status = packageStatus[1]\n break\n else:\n # Package is installed\n log.write('Package is installed: ' + str(packageName), 'drivers.getPackageStatus', 'debug')\n status = packageStatus[0]\n break\n # Package is not found: uninstallable\n if not status:\n log.write('Package not found: ' + str(packageName), 'drivers.getPackageStatus', 'warning')\n status = packageStatus[2]\n except:\n # If something went wrong: assume that package is uninstallable\n log.write('Could not get status info for package: ' + str(packageName), 'drivers.getPackageStatus', 'error')\n status = packageStatus[2]\n\n return status\n\n\n# Check if a package is installed\ndef isPackageInstalled(packageName):\n isInstalled = False\n cmd = 'aptitude search ' + packageName + ' | grep ^i'\n ec = ExecCmd(log)\n packageList = ec.run(cmd, False)\n if packageList:\n if len(packageList) > 0:\n isInstalled = True\n return isInstalled\n\n\n# List all dependencies of a package\ndef getPackageDependencies(packageName):\n retList = []\n cmd = 'apt-cache depends ' + packageName + ' | grep Depends'\n ec = ExecCmd(log)\n depList = ec.run(cmd, False)\n if depList:\n for line in depList:\n matchObj = re.search(':\\s(.*)', line)\n if matchObj:\n retList.append(matchObj.group(1))\n return retList\n\n\n# Check if a process is running\ndef isProcessRunning(processName):\n isProc = False\n cmd = 'ps -C ' + processName\n ec = ExecCmd(log)\n procList = ec.run(cmd, False)\n if procList:\n if len(procList) > 1:\n isProc = True\n return isProc\n\n\n# Get the package version number\ndef getPackageVersion(packageName):\n version = ''\n cmd = 'apt-cache policy ' + packageName + ' | grep Installed'\n ec = ExecCmd(log)\n versionList = ec.run(cmd, False)\n\n for line in versionList:\n versionObj = re.search(':\\s(.*)', line.lower())\n if versionObj:\n version = versionObj.group(1)\n return version\n\n\n# Check if system has wireless (not necessarily a wireless connection)\ndef hasWireless():\n wl = False\n cmd = 'iwconfig | grep \"Access Point\"'\n ec = ExecCmd(log)\n wlList = ec.run(cmd, False)\n if wlList:\n for line in wlList:\n if 'Access Point' in line:\n wl = True\n break\n return wl\n\n\n# Check if we're running live\ndef isRunningLive():\n live = False\n # Debian live mount directory\n dirLive = '/live'\n # Ubuntu live mount directory\n dirUbiquity = '/rofs'\n if os.path.exists(dirLive) or os.path.exists(dirUbiquity):\n live = True\n return live\n\n\n# Plymouth =============================================\n\n# Get a list of installed Plymouth themes\ndef getInstalledThemes():\n cmd = '/usr/sbin/plymouth-set-default-theme --list'\n ec = ExecCmd(log)\n instThemes = ec.run(cmd, False)\n return instThemes\n\n\n# Get the currently used Plymouth theme\ndef getCurrentTheme():\n curTheme = ['']\n if getCurrentResolution() != '':\n cmd = '/usr/sbin/plymouth-set-default-theme'\n ec = ExecCmd(log)\n curTheme = ec.run(cmd, False)\n return curTheme[0]\n\n\n# Get a list of Plymouth themes in the repositories that can be installed\ndef getAvailableThemes():\n cmd = 'aptitude search ' + avlThemesSearchstr + ' | grep ^p'\n ec = ExecCmd(log)\n availableThemes = ec.run(cmd)\n avlThemes = []\n\n for line in availableThemes:\n matchObj = re.search('plymouth-themes-([a-zA-Z0-9-]*)', line)\n if matchObj:\n theme = matchObj.group(1)\n if not 'all' in theme:\n avlThemes.append(theme)\n\n return avlThemes\n\n\ndef previewPlymouth():\n cmd = \"su -c 'plymouthd; plymouth --show-splash ; for ((I=0; I<10; I++)); do plymouth --update=test$I ; sleep 1; done; plymouth quit'\"\n log.write('Preview command: ' + cmd, 'drivers.previewPlymouth', 'debug')\n try:\n ec = ExecCmd(log)\n ec.run(cmd, False)\n except Exception, detail:\n log.write(detail, 'drivers.previewPlymouth', 'error')\n\n\n# Get the package name that can be uninstalled of a given Plymouth theme\ndef getRemovablePackageName(theme):\n cmd = 'dpkg -S ' + theme + '.plymouth'\n log.write('Search package command: ' + cmd, 'drivers.getRemovablePackageName', 'debug')\n package = ''\n ec = ExecCmd(log)\n packageNames = ec.run(cmd, False)\n\n for line in packageNames:\n if avlThemesSearchstr in line:\n matchObj = re.search('(^.*):', line)\n if matchObj:\n package = matchObj.group(1)\n break\n log.write('Package found ' + package, 'drivers.getRemovablePackageName', 'debug')\n return package\n\n\n# Get valid package name of a Plymouth theme (does not have to exist in the repositories)\ndef getPackageName(theme):\n return avlThemesSearchstr + \"-\" + theme\n" } ]
12
HoodieJ4D/Python-Number-Guessing
https://github.com/HoodieJ4D/Python-Number-Guessing
9739be0200713ac23bf19c36e14b5230c58e3c3d
1349bdbfe8fbe591d04b64365d9c0ac60001248f
5f97693447c12b7b9961d5b185d487f1ec648b5a
refs/heads/master
2023-05-13T21:25:03.878428
2021-06-07T19:39:42
2021-06-07T19:39:42
374,747,543
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5481012463569641, "alphanum_fraction": 0.5651898980140686, "avg_line_length": 25.79660987854004, "blob_id": "815665c7b30d6e9dc584119ea3ce89e4ba31fe5e", "content_id": "f016893b1d751d0c0ff4cb346d1835f0ac1809d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1580, "license_type": "no_license", "max_line_length": 108, "num_lines": 59, "path": "/Number-Guessing/Python_Number_Guessing.py", "repo_name": "HoodieJ4D/Python-Number-Guessing", "src_encoding": "UTF-8", "text": "import random\nimport time\n\ndef guessing():\n time.sleep(2)\n b = int(input(\"What is your first guess? \"))\n if b == x:\n time.sleep(1)\n print(\"Congratulations! You got the number!\")\n time.sleep(3)\n quit()\n elif b < x:\n time.sleep(1)\n print(\"The number is higher than your guess, try again\")\n elif b > x:\n time.sleep(1)\n print(\"Your number is lower than your guess, try again\")\n time.sleep(1)\n b = int(input(\"What is your second guess? \"))\n if b == x:\n time.sleep(1)\n print(\"Congratulations! You got the number!\")\n time.sleep(3)\n quit()\n else:\n time.sleep(1)\n print(\"The number chosen is an \" + c + \" number\")\n time.sleep(1)\n print(\"You have one more guess!\")\n b = int(input(\"What is your final guess? \"))\n if b == x:\n time.sleep(1)\n print(\"Congratulations! You got the number!\")\n time.sleep(3)\n quit()\n else:\n time.sleep(1)\n print(\"Sorry, you couldn't guess the number.\")\n time.sleep(1)\n print(\"The number was \" + str(x))\n time.sleep(2)\n print(\"Thanks for playing!\")\n time.sleep(3)\n quit()\n\nprint(\"Hello there\")\ntime.sleep(3)\nprint(\"Today we're going to play a game\")\ntime.sleep(3)\nprint(\"The game is this, I'll choose a random number between 0 and 20, and you'll have to guess the number\")\ntime.sleep(3)\nx = random.randint(0,20)\nc = x % 2\nif c == 0:\n c = \"even\"\nelse:\n c = \"odd\"\nprint(\"The number is chosen, you can begin guessing.\")\nguessing();" } ]
1
joshimbriani/criticalHit
https://github.com/joshimbriani/criticalHit
2e21f6f9cae6e0a7b41898fae6f7822cf18ac3c0
9da2d2fa4a2b3e6f343c3915bf4bebf1d5eafbd5
669a8871cd9907f626b7f7c79065e5bfe5381ee0
refs/heads/master
2021-01-20T14:03:02.383035
2017-05-08T01:56:15
2017-05-08T01:56:15
90,553,646
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4964539110660553, "alphanum_fraction": 0.5106382966041565, "avg_line_length": 22.33333396911621, "blob_id": "0ac9fb6227f2c74adc2a9cfcb9eb7481bb76ad89", "content_id": "2b5ba2167e6c6f7c44147f2ed884aeb5e1161527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 282, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/criticalHit/board.py", "repo_name": "joshimbriani/criticalHit", "src_encoding": "UTF-8", "text": "BOARD_WIDTH = 18\nBOARD_HEIGHT = 24\n\nclass Board:\n\n def __init__(self):\n self.board = []\n for i in range(BOARD_HEIGHT):\n boardRow = []\n for j in range(BOARD_WIDTH):\n boardRow.append('C')\n self.board.append(boardRow)\n\n\n" }, { "alpha_fraction": 0.5306122303009033, "alphanum_fraction": 0.5306122303009033, "avg_line_length": 18.600000381469727, "blob_id": "984a9d845f3e3b53d85cf0617536f0de22dfd78d", "content_id": "3404fef890353fc171ed7e91d1f2aab5411a1148", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 29, "num_lines": 5, "path": "/criticalHit/piece.py", "repo_name": "joshimbriani/criticalHit", "src_encoding": "UTF-8", "text": "class Piece:\n\n def __init__(self, name):\n self.name = name\n print(\"Piece class\")\n" }, { "alpha_fraction": 0.6857143044471741, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 18.090909957885742, "blob_id": "3c7ce84acea50f0ee3dcd14ccb6ff540807a3cea", "content_id": "94fd27bdc8881b4c043d53f63f6181fa78d11909", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 58, "num_lines": 11, "path": "/criticalHit/pieceImporter.py", "repo_name": "joshimbriani/criticalHit", "src_encoding": "UTF-8", "text": "from piece import Piece\n\n'''\n Read in piece objects\n\n Takes no arguments and returns a list of piece objects\n'''\ndef getPieces():\n\n # Look in the default directory for pieces (aka data)\n with open(\n" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 20, "blob_id": "8680c190b88d1d3a6b56e8b7de8d2c3bb5d4de6c", "content_id": "22568a57618524db51bd039f4dbe32da14e45b4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 84, "license_type": "no_license", "max_line_length": 37, "num_lines": 4, "path": "/criticalHit/boardImporter.py", "repo_name": "joshimbriani/criticalHit", "src_encoding": "UTF-8", "text": "class BoardImporter:\n\n def __init__(self):\n print(\"Board Importer class\")\n" } ]
4
pinaki-das-sage/assignments
https://github.com/pinaki-das-sage/assignments
47a6f8207f975799f3ca4c14db1da714bafc77b7
cb708b0a641eaa68e356edb60657ebb0750cc2ef
4ed62be1fb2f61315dded4a44df00ce9a33c5115
refs/heads/main
2023-06-14T17:45:29.022917
2021-07-09T19:24:03
2021-07-09T19:24:03
370,258,931
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7380073666572571, "alphanum_fraction": 0.8118081092834473, "avg_line_length": 9.037036895751953, "blob_id": "bc10fa2767879b07918bed3653a56be0470d19bb", "content_id": "b883e8a078ce189e6665289abef7733108f4e67b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 271, "license_type": "no_license", "max_line_length": 16, "num_lines": 27, "path": "/requirements.txt", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "Flask~=2.0.0\npandas~=1.2.4\ngunicorn\nplotly~=4.14.3\ncertifi\nchardet\nClick\ndask\ndecorator\nFlask-Compress\nhtml5lib\nidna\nipython-genutils\nJinja2\njupyter-core\nnbformat\nnumpy~=1.20.3\npytz\nsix~=1.12.0\ntraitlets\nWerkzeug\nscikit-learn\nstatsmodels\ngraphviz\npydotplus\nIPython\npydot\n" }, { "alpha_fraction": 0.6241626143455505, "alphanum_fraction": 0.6347223520278931, "avg_line_length": 41.545894622802734, "blob_id": "2d952a39e81f4e525cd0c922dcc0d05d98785258", "content_id": "8937efd95d2963b5bade2d2af63be5123d5bd544", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8807, "license_type": "no_license", "max_line_length": 117, "num_lines": 207, "path": "/assignment9.py", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom flask import render_template\nimport numpy as np\nimport plotly.express as px\nimport plotly\nimport json\n\nfrom sklearn.model_selection import train_test_split\nfrom customutils import CustomUtils\n\n\nclass Assignment9:\n @staticmethod\n def binary_map(x):\n return x.map({'Yes': 1, \"No\": 0})\n\n @staticmethod\n def process():\n churn_data = CustomUtils.read_file_and_return_df('9_churn_data.csv')\n customer_data = CustomUtils.read_file_and_return_df('9_customer_data.csv')\n internet_data = CustomUtils.read_file_and_return_df('9_internet_data.csv')\n\n # merge churn data with customer data\n df_1 = pd.merge(churn_data, customer_data, how='inner', on='customerID')\n\n # merge with internet usage data\n dataset = pd.merge(df_1, internet_data, how='inner', on='customerID')\n # dataset.isnull().sum()\n\n # dataset.head()\n # clean the data\n # dataset['TotalCharges'].describe()\n dataset['TotalCharges'] = dataset['TotalCharges'].replace(' ', np.nan)\n dataset['TotalCharges'] = pd.to_numeric(dataset['TotalCharges'])\n\n value = (dataset['TotalCharges'] / dataset['MonthlyCharges']).median() * dataset['MonthlyCharges']\n dataset['TotalCharges'] = value.where(dataset['TotalCharges'] == np.nan, other=dataset['TotalCharges'])\n # dataset['TotalCharges'].describe()\n\n varlist = ['PhoneService', 'PaperlessBilling', 'Churn', 'Partner', 'Dependents']\n dataset[varlist] = dataset[varlist].apply(Assignment9.binary_map)\n # dataset.head()\n\n # one hot encoding and merge\n dummy1 = pd.get_dummies(dataset[['Contract', 'PaymentMethod', 'gender', 'InternetService']], drop_first=True)\n dataset = pd.concat([dataset, dummy1], axis=1)\n # dataset.head()\n\n # Creating dummy variables for the variable 'MultipleLines'\n ml = pd.get_dummies(dataset['MultipleLines'], prefix='MultipleLines')\n # Dropping MultipleLines_No phone service column\n ml1 = ml.drop(['MultipleLines_No phone service'], 1)\n # Adding the results to the master dataframe\n dataset = pd.concat([dataset, ml1], axis=1)\n\n # Creating dummy variables for the variable 'OnlineSecurity'.\n os = pd.get_dummies(dataset['OnlineSecurity'], prefix='OnlineSecurity')\n os1 = os.drop(['OnlineSecurity_No internet service'], 1)\n # Adding the results to the master dataframe\n dataset = pd.concat([dataset, os1], axis=1)\n\n # Creating dummy variables for the variable 'OnlineBackup'.\n ob = pd.get_dummies(dataset['OnlineBackup'], prefix='OnlineBackup')\n ob1 = ob.drop(['OnlineBackup_No internet service'], 1)\n # Adding the results to the master dataframe\n dataset = pd.concat([dataset, ob1], axis=1)\n\n # Creating dummy variables for the variable 'DeviceProtection'.\n dp = pd.get_dummies(dataset['DeviceProtection'], prefix='DeviceProtection')\n dp1 = dp.drop(['DeviceProtection_No internet service'], 1)\n # Adding the results to the master dataframe\n dataset = pd.concat([dataset, dp1], axis=1)\n\n # Creating dummy variables for the variable 'TechSupport'.\n ts = pd.get_dummies(dataset['TechSupport'], prefix='TechSupport')\n ts1 = ts.drop(['TechSupport_No internet service'], 1)\n # Adding the results to the master dataframe\n dataset = pd.concat([dataset, ts1], axis=1)\n\n # Creating dummy variables for the variable 'StreamingTV'.\n st = pd.get_dummies(dataset['StreamingTV'], prefix='StreamingTV')\n st1 = st.drop(['StreamingTV_No internet service'], 1)\n # Adding the results to the master dataframe\n dataset = pd.concat([dataset, st1], axis=1)\n\n # Creating dummy variables for the variable 'StreamingMovies'.\n smd = pd.get_dummies(dataset['StreamingMovies'], prefix='StreamingMovies')\n smd.drop(['StreamingMovies_No internet service'], 1, inplace=True)\n # Adding the results to the master dataframe\n dataset = pd.concat([dataset, smd], axis=1)\n # dataset.head()\n\n # drop the columns for which dummies have been created\n dataset = dataset.drop(\n ['Contract', 'PaymentMethod', 'gender', 'MultipleLines', 'InternetService', 'OnlineSecurity',\n 'OnlineBackup', 'DeviceProtection',\n 'TechSupport', 'StreamingTV', 'StreamingMovies'], 1)\n # dataset.head()\n\n # outliers removal\n # num_telecom = dataset[['tenure', 'MonthlyCharges', 'SeniorCitizen', 'TotalCharges']]\n # num_telecom.describe(percentiles=[.25, .5, .75, .90, .95, .99])\n # dataset.isnull().sum()\n dataset = dataset[~np.isnan(dataset['TotalCharges'])]\n\n # define feature and target\n X = dataset.drop(['Churn', 'customerID'], axis=1)\n # X.head()\n\n y = dataset['Churn']\n # y.head()\n\n # Splitting the data into train and test\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, test_size=0.3, random_state=100)\n\n # Feature Scaling\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n\n X_train[['tenure', 'MonthlyCharges', 'TotalCharges']] = scaler.fit_transform(\n X_train[['tenure', 'MonthlyCharges', 'TotalCharges']])\n\n # X_train.head()\n\n # Model Building\n # Logistic regression model\n import statsmodels.api as sm\n logm1 = sm.GLM(y_train, (sm.add_constant(X_train)), family=sm.families.Binomial())\n logm1.fit().summary()\n\n # Feature Selection Using RFE\n from sklearn.linear_model import LogisticRegression\n logreg = LogisticRegression(max_iter=1000)\n logreg.fit(X_train, y_train)\n\n # display the coefficients as a dataframe\n feature_cols = X.columns\n coeffs = pd.DataFrame(list(zip(feature_cols, logreg.coef_[0])), columns=['feature', 'coef'])\n coeffs.set_index('feature', inplace=True)\n # coeffs.sort_values('coef', ascending=False).head(15)\n\n # create a bar chart out of it\n fig = px.bar(coeffs.sort_values('coef', ascending=False), height=600)\n\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n # Adding a constant\n X_train_sm = sm.add_constant(X_train[feature_cols])\n logm2 = sm.GLM(y_train, X_train_sm, family=sm.families.Binomial())\n res = logm2.fit()\n res.summary()\n\n # Getting the predicted values on the train set\n y_train_pred = res.predict(X_train_sm)\n\n y_train_pred_final = pd.DataFrame({'Churn': y_train.values, 'Churn_Prob': y_train_pred})\n y_train_pred_final['CustID'] = y_train.index\n # y_train_pred_final.head()\n\n # Creating new column 'predicted' with 1 if Churn_Prob > 0.5 else 0\n y_train_pred_final['predicted'] = y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.5 else 0)\n\n # Let's see the head\n # y_train_pred_final.head()\n\n # confusion matrix\n from sklearn import metrics\n # confusion_matrix = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.predicted)\n # print(confusion_matrix)\n\n accuracy_value = metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted)\n\n # Making predictions on the test set\n X_test[['tenure', 'MonthlyCharges', 'TotalCharges']] = scaler.fit_transform(\n X_test[['tenure', 'MonthlyCharges', 'TotalCharges']])\n X_test = X_test[feature_cols]\n # X_test.head()\n\n X_test_sm = sm.add_constant(X_test)\n y_test_pred = res.predict(X_test_sm)\n\n # Converting y_pred to a dataframe which is an array\n y_pred_1 = pd.DataFrame(y_test_pred)\n # y_pred_1.head()\n\n # Converting y_test to dataframe\n y_test_df = pd.DataFrame(y_test)\n\n # Putting CustID to index\n y_test_df['CustID'] = y_test_df.index\n\n y_pred_1.reset_index(drop=True, inplace=True)\n y_test_df.reset_index(drop=True, inplace=True)\n\n y_pred_final = pd.concat([y_test_df, y_pred_1], axis=1)\n y_pred_final = y_pred_final.reindex(['CustID', 'Churn', 'Churn_Prob'], axis=1)\n y_pred_final['final_predicted'] = y_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.42 else 0)\n\n baseline_accuracy = metrics.accuracy_score(y_pred_final.Churn, y_pred_final.final_predicted)\n accuracy_improvement = accuracy_value - baseline_accuracy\n values = {\n 'accuracy_value': accuracy_value,\n 'baseline_accuracy': baseline_accuracy,\n 'accuracy_improvement': accuracy_improvement\n }\n\n return render_template(\"assignment9.html.j2\", graphJSON=graphJSON, values=values)\n" }, { "alpha_fraction": 0.591208815574646, "alphanum_fraction": 0.601098895072937, "avg_line_length": 32.66666793823242, "blob_id": "13e7d06796f8157d23256519804ff029b74db165", "content_id": "db5cf96613f33c61d166a0b86e240a94d1b6d546", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 910, "license_type": "no_license", "max_line_length": 108, "num_lines": 27, "path": "/customutils.py", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "from sklearn import tree\nimport pydotplus\nimport base64\nfrom IPython.display import Image\nimport os\nfrom pathlib import Path\nimport pandas as pd\n\n\nclass CustomUtils:\n @staticmethod\n def get_base64_encoded_image(decision_tree, columns):\n dot_data = tree.export_graphviz(decision_tree, out_file=None, feature_names=columns, impurity=False,\n filled=True,\n proportion=True,\n rounded=True)\n\n graph = pydotplus.graph_from_dot_data(dot_data)\n image = Image(graph.create_png())\n encodedImage = base64.b64encode(image.data).decode(\"utf-8\")\n return encodedImage\n\n @staticmethod\n def read_file_and_return_df(filename):\n filepath = os.path.join(Path(__file__).parent, 'data', '.')\n df = pd.read_csv(f'{filepath}/{filename}')\n return df\n\n" }, { "alpha_fraction": 0.5755761861801147, "alphanum_fraction": 0.5860520601272583, "avg_line_length": 29.372726440429688, "blob_id": "9db2e68e5c884e8a73aca36b23cb71f34de097c4", "content_id": "0b07dcbb876d555c913b9272f26da09048afb40e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3341, "license_type": "no_license", "max_line_length": 120, "num_lines": 110, "path": "/assignment10.py", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "from flask import render_template\nfrom flask import request\n\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import metrics\nfrom customutils import CustomUtils\n\n\nclass Assignment10:\n @staticmethod\n def gender_map(x):\n return x.map({'male': 1, \"female\": 0})\n\n @staticmethod\n def process():\n passengers = CustomUtils.read_file_and_return_df('10_titanic.csv');\n feature_cols = ['Pclass', 'Sex', 'Age']\n # passengers.head()\n\n passengers[['Sex']] = passengers[['Sex']].apply(Assignment10.gender_map)\n # passengers.head()\n\n # there are some NaN values in age, we use the mean age there\n mean_age = passengers['Age'].mean()\n passengers['Age'].fillna(value=mean_age, inplace=True)\n\n passengers.head()\n # mean_age\n\n X = passengers[feature_cols]\n y = passengers['Survived']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=90)\n knn = KNeighborsClassifier(n_neighbors=21)\n\n knn.fit(X_train, y_train)\n # predict\n y_pred = knn.predict(X_test)\n model_accuracy = metrics.accuracy_score(y_test, y_pred)\n # model_accuracy\n\n return render_template(\"assignment10.html.j2\", model_accuracy=model_accuracy)\n\n # @TODO figure out a better way to handle the duplicate code\n @staticmethod\n def predict():\n data = request.form\n age = data.get(\"age\")\n gender = data.get(\"gender\")\n pclass = data.get(\"pclass\")\n\n # put some default values in case user didnt pass anything\n if gender == \"\":\n gender = 1\n\n if pclass == \"\":\n pclass = 2\n\n import os\n from pathlib import Path\n filepath = os.path.join(Path(__file__).parent, 'data', '.')\n\n passengers = pd.read_csv(f'{filepath}/10_titanic.csv')\n feature_cols = ['Pclass', 'Sex', 'Age']\n # passengers.head()\n\n passengers[['Sex']] = passengers[['Sex']].apply(Assignment10.gender_map)\n # passengers.head()\n\n # there are some NaN values in age, we use the mean age there\n mean_age = passengers['Age'].mean()\n passengers['Age'].fillna(value=mean_age, inplace=True)\n\n if age == \"\":\n age = str(round(mean_age, 2))\n\n # passengers.head()\n # mean_age\n\n X = passengers[feature_cols]\n y = passengers['Survived']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=90)\n knn = KNeighborsClassifier(n_neighbors=21)\n\n knn.fit(X_train, y_train)\n # predict\n y_pred = knn.predict(X_test)\n\n survived = knn.predict([[pclass, gender, age]])[0]\n\n survivedString = \"Died\"\n if survived == 1:\n survivedString = \"Survived\"\n\n genderString = \"female\"\n if gender == \"1\":\n genderString = \"male\"\n\n pclassString = \"Third\"\n if pclass == \"1\":\n pclassString = \"First\"\n elif pclass == \"2\":\n pclassString = \"Second\"\n\n return f'a person with <b>{genderString}</b> gender of <b>{age}</b> age in <b>{pclassString}</b> class would ' \\\n f'have <b>{survivedString}</b> according to knn '\n" }, { "alpha_fraction": 0.5934478044509888, "alphanum_fraction": 0.6054864525794983, "avg_line_length": 44.24106979370117, "blob_id": "9cfafc3589a18456233a0833a6e13976bf8985a6", "content_id": "0908f1cbc6b68ad79787045302eec1af070af065", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5067, "license_type": "no_license", "max_line_length": 167, "num_lines": 112, "path": "/assignment16.py", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "from flask import render_template\nimport pandas as pd\nimport numpy as np\nfrom ast import literal_eval\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\nfrom customutils import CustomUtils\nimport warnings; warnings.simplefilter('ignore')\n\n\nclass Assignment16:\n @staticmethod\n def process():\n md = CustomUtils.read_file_and_return_df('16_movies_metadata.csv')\n # md.head()\n\n # fill the null values with []\n md['genres'] = md['genres'].fillna('[]').apply(literal_eval).apply(\n lambda x: [i['name'] for i in x] if isinstance(x, list) else []\n )\n\n # get the vote counts and averages for all movies\n vote_counts = md[md['vote_count'].notnull()]['vote_count'].astype('int')\n vote_averages = md[md['vote_average'].notnull()]['vote_average'].astype('int')\n vote_mean = vote_averages.mean()\n # vote_mean\n\n top_vote_counts = vote_counts.quantile(0.95)\n # top_vote_counts\n\n # get release year for all movies in a new column\n md['year'] = pd.to_datetime(md['release_date'], errors='coerce').apply(\n lambda x: str(x).split('-')[0] if x != np.nan else np.nan\n )\n\n # get the above average movies list\n qualified = md[(md['vote_count'] >= top_vote_counts) & (md['vote_count'].notnull()) & (md['vote_average'].notnull())][\n ['title', 'year', 'vote_count', 'vote_average', 'popularity', 'genres']]\n qualified['vote_count'] = qualified['vote_count'].astype('int')\n qualified['vote_average'] = qualified['vote_average'].astype('int')\n # qualified.shape\n\n # get the top 250 movies by vote average\n qualified = qualified.sort_values('vote_average', ascending=False).head(250)\n # qualified.head(15)\n\n s = md.apply(lambda x: pd.Series(x['genres']), axis=1).stack().reset_index(level=1, drop=True)\n s.name = 'genre'\n gen_md = md.drop('genres', axis=1).join(s)\n\n best_romantic_movies = Assignment16.build_chart(gen_md, 'Romance').head(15)\n\n links_small = CustomUtils.read_file_and_return_df('16_links_small.csv')\n links_small = links_small[links_small['tmdbId'].notnull()]['tmdbId'].astype('int')\n\n md = md.drop([19730, 29503, 35587])\n md['id'] = md['id'].astype('int')\n smd = md[md['id'].isin(links_small)]\n # smd.shape\n\n smd['tagline'] = smd['tagline'].fillna('')\n smd['description'] = smd['overview'] + smd['tagline']\n smd['description'] = smd['description'].fillna('')\n\n tf = TfidfVectorizer(analyzer='word')\n tfidf_matrix = tf.fit_transform(smd['description'])\n # tfidf_matrix.shape\n\n cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)\n # cosine_sim[0]\n\n smd = smd.reset_index()\n titles = smd['title']\n indices = pd.Series(smd.index, index=smd['title'])\n\n movie_to_search = 'Batman Begins'\n recommendations = Assignment16.get_recommendations(indices, cosine_sim, titles, movie_to_search).head(10)\n\n return render_template(\"assignment16.html.j2\", vote_counts=vote_counts, vote_averages=vote_averages,\n vote_mean=vote_mean, best_romantic_movies=best_romantic_movies.to_html(classes='table table-striped', index=False, justify='center'),\n movie_to_search=movie_to_search, recommendations=recommendations.to_html(classes='table table-striped', index=False, justify='center'),\n sample_dataset=md.head(5).to_html(classes='table table-striped', index=False, justify='center')\n )\n\n @staticmethod\n def build_chart(gen_md, genre, percentile=0.85):\n df = gen_md[gen_md['genre'] == genre]\n vote_counts = df[df['vote_count'].notnull()]['vote_count'].astype('int')\n vote_averages = df[df['vote_average'].notnull()]['vote_average'].astype('int')\n C = vote_averages.mean()\n m = vote_counts.quantile(percentile)\n\n qualified = df[(df['vote_count'] >= m) & (df['vote_count'].notnull()) & (df['vote_average'].notnull())][\n ['title', 'year', 'vote_count', 'vote_average', 'popularity']]\n qualified['vote_count'] = qualified['vote_count'].astype('int')\n qualified['vote_average'] = qualified['vote_average'].astype('int')\n\n qualified['wr'] = qualified.apply(\n lambda x: (x['vote_count'] / (x['vote_count'] + m) * x['vote_average']) + (m / (m + x['vote_count']) * C),\n axis=1)\n qualified = qualified.sort_values('wr', ascending=False).head(250)\n\n return qualified\n\n @staticmethod\n def get_recommendations(indices, cosine_sim, titles, title):\n idx = indices[title]\n sim_scores = list(enumerate(cosine_sim[idx]))\n sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n sim_scores = sim_scores[1:31]\n movie_indices = [i[0] for i in sim_scores]\n return titles.iloc[movie_indices].to_frame()\n" }, { "alpha_fraction": 0.6230642795562744, "alphanum_fraction": 0.6369028091430664, "avg_line_length": 43.632354736328125, "blob_id": "2c3d9c319dc06210f92730a5309eb52b2a367007", "content_id": "c1d99c0b89dd16b0eea3ecf51a948f96dff5120f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3035, "license_type": "no_license", "max_line_length": 146, "num_lines": 68, "path": "/assignment17.py", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "from flask import render_template\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport plotly.express as px\nimport plotly\nimport json\n\nfrom customutils import CustomUtils\nimport warnings; warnings.simplefilter('ignore')\n\n\nclass Assignment17:\n @staticmethod\n def process():\n df = CustomUtils.read_file_and_return_df('17_monthly_ridership.csv')\n # df.head()\n\n # rename the column names\n df.columns = [\"month\", \"average_monthly_ridership\"]\n # df.head()\n\n # data cleanup\n df['average_monthly_ridership'].unique()\n df = df.drop(df.index[df['average_monthly_ridership'] == ' n=114'])\n\n # correct the column dtypes\n df['average_monthly_ridership'] = df['average_monthly_ridership'].astype(np.int32)\n df['month'] = pd.to_datetime(df['month'], format='%Y-%m')\n # df.dtypes\n\n average_rider_line_chart = px.line(df, x=\"month\", y=\"average_monthly_ridership\", title='Average monthly bus riders in Oergon', height=600)\n\n # change the month to numeric format so we have monthly data rather than yearly\n to_plot_monthly_variation = df\n mon = df['month']\n temp = pd.DatetimeIndex(mon)\n month = pd.Series(temp.month)\n to_plot_monthly_variation = to_plot_monthly_variation.drop(['month'], axis=1)\n to_plot_monthly_variation = to_plot_monthly_variation.join(month)\n to_plot_monthly_variation.head()\n\n average_rider_bar_chart = px.bar(to_plot_monthly_variation, x='month', y='average_monthly_ridership', height=600)\n\n # observations = ridership declines in july and august\n # Applying Seasonal ARIMA model to forcast the data\n mod = sm.tsa.SARIMAX(df['average_monthly_ridership'], trend='n', order=(0, 1, 0), seasonal_order=(1, 1, 1, 12))\n results = mod.fit()\n # print(results.summary())\n\n df['forecast'] = results.predict(start=102, end=120, dynamic=True)\n rider_forecast = px.line(df, x='month', y=['average_monthly_ridership', 'forecast'], height=600)\n\n return render_template(\"assignment17.html.j2\",\n sample_dataset=df.head(5).to_html(classes='table table-striped', index=False, justify='center'),\n average_rider_line_json=json.dumps(average_rider_line_chart, cls=plotly.utils.PlotlyJSONEncoder),\n average_rider_bar_json=json.dumps(average_rider_bar_chart, cls=plotly.utils.PlotlyJSONEncoder),\n rider_forecast_json=json.dumps(rider_forecast, cls=plotly.utils.PlotlyJSONEncoder)\n )\n\n @staticmethod\n def get_recommendations(indices, cosine_sim, titles, title):\n idx = indices[title]\n sim_scores = list(enumerate(cosine_sim[idx]))\n sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n sim_scores = sim_scores[1:31]\n movie_indices = [i[0] for i in sim_scores]\n return titles.iloc[movie_indices].to_frame()\n" }, { "alpha_fraction": 0.5697344541549683, "alphanum_fraction": 0.5794019103050232, "avg_line_length": 44.374267578125, "blob_id": "bd7433d70b46e8052cbabe86ad572e1c6934c67b", "content_id": "7a36b42e90cb6294776538d7647fa15210d8cb81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7758, "license_type": "no_license", "max_line_length": 165, "num_lines": 171, "path": "/assignment12.py", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "from flask import render_template\nimport plotly.express as px\nimport plotly\nimport json\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import tree\nfrom customutils import CustomUtils\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom IPython.display import Image\nfrom six import StringIO\nfrom sklearn.tree import export_graphviz\nimport pydot\nimport pandas as pd\n\n\nclass Assignment12:\n @staticmethod\n def process():\n df = CustomUtils.read_file_and_return_df('11b_employee.csv')\n # df.head()\n\n # pd.set_option(\"display.float_format\", \"{:.2f}\".format)\n # df.describe()\n\n df.drop(['EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours'], axis=\"columns\", inplace=True)\n # df.head()\n\n label = LabelEncoder()\n df['Attrition'] = label.fit_transform(df['Attrition'])\n # df.head()\n\n # create a list of categorical columns, any \"object\" (str) columns with less than 10 unique values should be fit\n categorical_cols = []\n unique_vals = []\n for column in df.columns:\n if df[column].dtype == object and len(df[column].unique()) <= 10:\n categorical_cols.append(column)\n unique_vals.append(\", \".join(df[column].unique()))\n\n categories = pd.DataFrame.from_dict({\n 'Category': categorical_cols,\n 'Unique Values': unique_vals\n })\n # categories\n\n # df.hist(edgecolor='black', linewidth=1.2, figsize=(20, 20));\n categorical_cols.append('Attrition')\n df = df[categorical_cols]\n df.head()\n categorical_cols.remove('Attrition')\n\n barChartJsons = []\n # plot how every feature correlates with the \"target\"\n for i, column in enumerate(categorical_cols, 1):\n # print(df[column].value_counts())\n fig = px.bar(df, x=f'{column}', y='Attrition', height=600, color=f'{column}')\n chartJson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n barChartJsons.append(chartJson)\n # fig.show()\n\n conclusions = pd.DataFrame.from_dict({\n 'Category': [\n 'BusinessTravel', 'Department', 'EducationField', 'Gender', 'JobRole', 'MaritalStatus', 'OverTime'\n ],\n 'Inference': [\n 'The workers who travel rarely are more likely to quit.',\n 'The worker in Research & Development are more likely to quit then the workers on other departement.',\n 'The workers with Life Sciences and Medical degrees are more likely to quit then employees from other fields of educations.',\n 'Male employees are more likely to quit.',\n 'The workers in Laboratory Technician, Sales Executive, and Research scientist are more likely to quit the workers in other positions.',\n 'Single employees are more likely to quit.',\n 'The workers who work more hours are more likely to quit.'\n ],\n })\n\n # encode all the categorical columns\n label = LabelEncoder()\n for column in categorical_cols:\n df[column] = label.fit_transform(df[column])\n\n # df.head()\n\n X = df.drop('Attrition', axis=1)\n y = df.Attrition\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n\n tree_clf = DecisionTreeClassifier(random_state=42)\n tree_clf.fit(X_train, y_train)\n\n random_train_scores = Assignment12.get_score(tree_clf, X_train, y_train, X_test, y_test, train=True)\n # random_test_scores = Assignment12.get_score(tree_clf, X_train, y_train, X_test, y_test, train=False)\n\n params = {\n \"criterion\": (\"gini\", \"entropy\"),\n \"splitter\": (\"best\", \"random\"),\n \"max_depth\": (list(range(1, 20))),\n \"min_samples_split\": [2, 3, 4],\n \"min_samples_leaf\": list(range(1, 20)),\n }\n\n tree_clf = DecisionTreeClassifier(random_state=42)\n tree_cv = GridSearchCV(tree_clf, params, scoring=\"accuracy\", n_jobs=-1, verbose=1, cv=3)\n tree_cv.fit(X_train, y_train)\n best_params = tree_cv.best_params_\n # print(f\"Best paramters: {best_params}\")\n\n tree_clf = DecisionTreeClassifier(**best_params)\n tree_clf.fit(X_train, y_train)\n # bestparams_train_score = Assignment12.get_score(tree_clf, X_train, y_train, X_test, y_test, train=True)\n # bestparams_test_score = Assignment12.get_score(tree_clf, X_train, y_train, X_test, y_test, train=False)\n\n features = list(df.columns)\n features.remove(\"Attrition\")\n\n dot_data = StringIO()\n export_graphviz(tree_clf, out_file=dot_data, feature_names=features, filled=True)\n graph = pydot.graph_from_dot_data(dot_data.getvalue())\n Image(graph[0].create_png())\n\n tree2 = CustomUtils.get_base64_encoded_image(tree_clf, X_train.columns)\n\n return render_template(\"assignment12.html.j2\", barChartJsons=barChartJsons,\n categories=categories.to_html(classes='table table-striped', index=False, justify='center'),\n conclusions=conclusions.to_html(classes='table table-striped', index=False, justify='center'),\n\n random_train_scores=pd.DataFrame.from_dict(random_train_scores).to_html(classes='table table-striped', index=False, justify='center'),\n tree2=tree2\n # random_test_scores=random_test_scores,\n\n # best_params=pd.DataFrame.from_dict(best_params).to_html(classes='table table-striped', index=False, justify='center'),\n\n # bestparams_train_score = bestparams_train_score, bestparams_test_score=bestparams_test_score\n )\n\n @staticmethod\n def get_score(clf, X_train, y_train, X_test, y_test, train=True):\n if train:\n pred = clf.predict(X_train)\n clf_report = classification_report(y_train, pred, output_dict=True)\n accuracy = f'{accuracy_score(y_train, pred) * 100:.2f}%'\n confusion = f'{confusion_matrix(y_train, pred)}'\n\n print(\"Train Result:\\n================================================\")\n print(f\"Accuracy Score: {accuracy_score(y_train, pred) * 100:.2f}%\")\n print(\"_______________________________________________\")\n print(f\"CLASSIFICATION REPORT:\\n{clf_report}\")\n print(\"_______________________________________________\")\n print(f\"Confusion Matrix: \\n {confusion_matrix(y_train, pred)}\\n\")\n\n elif not train:\n pred = clf.predict(X_test)\n clf_report = pd.DataFrame(classification_report(y_test, pred, output_dict=True))\n accuracy = f'{accuracy_score(y_test, pred) * 100:.2f}%'\n confusion = f'{confusion_matrix(y_test, pred)}'\n\n # print(\"Test Result:\\n================================================\")\n # print(f\"Accuracy Score: {accuracy_score(y_test, pred) * 100:.2f}%\")\n # print(\"_______________________________________________\")\n # print(f\"CLASSIFICATION REPORT:\\n{clf_report}\")\n # print(\"_______________________________________________\")\n # print(f\"Confusion Matrix: \\n {confusion_matrix(y_test, pred)}\\n\")\n\n return {\n 'accuracy_score': accuracy,\n 'confusion_matrix': confusion,\n 'classification_report': clf_report\n }" }, { "alpha_fraction": 0.5910229086875916, "alphanum_fraction": 0.6106722950935364, "avg_line_length": 44.53508758544922, "blob_id": "829046488ecc1a510621fca0a8540a7688cd9c65", "content_id": "103290f4a404f6cdf6dfc2b49c8a32e4fdb8e4e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5191, "license_type": "no_license", "max_line_length": 116, "num_lines": 114, "path": "/assignment11.py", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "from flask import render_template\nimport plotly.express as px\nimport plotly\nimport pandas as pd\nimport json\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import tree\nfrom customutils import CustomUtils\n\n\nclass Assignment11:\n @staticmethod\n def process():\n bank = CustomUtils.read_file_and_return_df('11a_bank.csv')\n # bank.head()\n bank_data = bank.copy()\n\n # Combine similar jobs into categiroes\n bank_data['job'] = bank_data['job'].replace(['admin.'], 'management')\n bank_data['job'] = bank_data['job'].replace(['housemaid'], 'services')\n bank_data['job'] = bank_data['job'].replace(['self-employed'], 'entrepreneur')\n bank_data['job'] = bank_data['job'].replace(['retired', 'student', 'unemployed', 'unknown'], 'others')\n\n # Combine 'unknown' and 'other' as 'other' isn't really match with either 'success' or 'failure'\n bank_data['poutcome'] = bank_data['poutcome'].replace(['other'], 'unknown')\n bank_data.poutcome.value_counts()\n\n # data cleanup\n bank_data.drop('contact', axis=1, inplace=True)\n\n bank_data['default_cat'] = bank_data['default'].map({'yes': 1, 'no': 0})\n bank_data.drop('default', axis=1, inplace=True)\n\n bank_data[\"housing_cat\"] = bank_data['housing'].map({'yes': 1, 'no': 0})\n bank_data.drop('housing', axis=1, inplace=True)\n\n bank_data[\"loan_cat\"] = bank_data['loan'].map({'yes': 1, 'no': 0})\n bank_data.drop('loan', axis=1, inplace=True)\n\n bank_data.drop('month', axis=1, inplace=True)\n bank_data.drop('day', axis=1, inplace=True)\n\n bank_data[\"deposit_cat\"] = bank_data['deposit'].map({'yes': 1, 'no': 0})\n bank_data.drop('deposit', axis=1, inplace=True)\n\n bank_with_dummies = pd.get_dummies(data=bank_data, columns=['job', 'marital', 'education', 'poutcome'], \\\n prefix=['job', 'marital', 'education', 'poutcome'])\n # bank_with_dummies.head()\n fig = px.bar(bank_data, x='job', y='deposit_cat', height=600, color='job')\n barchartJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n # make a copy\n bankcl = bank_with_dummies\n # The Correltion matrix\n corr = bankcl.corr()\n # corr\n\n # Train-Test split: 20% test data\n data_drop_deposite = bankcl.drop('deposit_cat', 1)\n label = bankcl.deposit_cat\n data_train, data_test, label_train, label_test = train_test_split(data_drop_deposite, label, test_size=0.2,\n random_state=50)\n\n # Decision tree with depth = 2\n dt2 = tree.DecisionTreeClassifier(random_state=1, max_depth=2)\n dt2.fit(data_train, label_train)\n dt2_score_train = dt2.score(data_train, label_train)\n dt2_score_test = dt2.score(data_test, label_test)\n\n # Decision tree with depth = 3\n dt3 = tree.DecisionTreeClassifier(random_state=1, max_depth=3)\n dt3.fit(data_train, label_train)\n dt3_score_train = dt3.score(data_train, label_train)\n dt3_score_test = dt3.score(data_test, label_test)\n\n # Decision tree with depth = 4\n dt4 = tree.DecisionTreeClassifier(random_state=1, max_depth=4)\n dt4.fit(data_train, label_train)\n dt4_score_train = dt4.score(data_train, label_train)\n dt4_score_test = dt4.score(data_test, label_test)\n\n # Decision tree with depth = 6\n dt6 = tree.DecisionTreeClassifier(random_state=1, max_depth=6)\n dt6.fit(data_train, label_train)\n dt6_score_train = dt6.score(data_train, label_train)\n dt6_score_test = dt6.score(data_test, label_test)\n\n # Decision tree: To the full depth\n dt1 = tree.DecisionTreeClassifier()\n dt1.fit(data_train, label_train)\n dt1_score_train = dt1.score(data_train, label_train)\n # print(\"Training score: \", dt1_score_train)\n dt1_score_test = dt1.score(data_test, label_test)\n # print(\"Testing score: \", dt1_score_test)\n\n # convert all data to pandas df and sent to template to print\n scores = {\n \"Tree Depth\": [\"2\", \"3\", \"4\", \"6\", \"max\"],\n \"Training score\": [dt2_score_train, dt3_score_train, dt4_score_train, dt6_score_train, dt1_score_train],\n \"Testing score\": [dt2_score_test, dt3_score_test, dt4_score_test, dt6_score_test, dt1_score_test]\n }\n scoresDf = pd.DataFrame.from_dict(scores)\n scoresDfHTML = scoresDf.to_html(classes='table table-striped', index=False, justify='center')\n\n # Extract the deposte_cat column (the dependent variable)\n # corr_deposite = pd.DataFrame(corr['deposit_cat'].drop('deposit_cat'))\n # corr_deposite.sort_values(by='deposit_cat', ascending=False)\n\n tree2 = CustomUtils.get_base64_encoded_image(dt2, data_train.columns)\n tree3 = CustomUtils.get_base64_encoded_image(dt3, data_train.columns)\n\n return render_template(\"assignment11.html.j2\", barchartJSON=barchartJSON,\n scoresDfHTML=scoresDfHTML,\n tree2=tree2, tree3=tree3)\n" }, { "alpha_fraction": 0.630630612373352, "alphanum_fraction": 0.637837827205658, "avg_line_length": 33.6875, "blob_id": "e1620b9a34a5d207d8386fe49c0657fbd5a30d0f", "content_id": "ef59cad4498fb2491b8f68af4c694a90bf3588cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1665, "license_type": "no_license", "max_line_length": 108, "num_lines": 48, "path": "/assignment5.py", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "import os\nfrom pathlib import Path\n\nimport pandas as pd\nfrom flask import render_template\nimport plotly.express as px\nimport plotly\nimport json\n\n\nclass Assignment5:\n movies = None\n\n def __init__(self):\n filename = os.path.join(Path(__file__).parent, 'data', '5_imdb_top_1000.csv')\n self.movies = pd.read_csv(filename)\n\n def process(self):\n # create a earnings column from gross by replacing all ,\n self.movies['Earnings'] = self.movies['Gross'].str.replace(',', '')\n movies = self.movies.astype({'Earnings': float})\n\n # create a new column for year\n movies['Year'] = movies['Released_Year']\n\n # there's a stray PG value in the Year column, filter it out\n movies['Year'] = movies[movies['Year'] != 'PG']['Year']\n\n # drop null values from Year column\n movies['Year'].dropna(inplace=True)\n\n # group by year but retain it as a column (dont make it an index)\n groupedMoviesList = movies.groupby('Year', as_index=False)\n\n # get a average of the ratings per year\n averageRatingByYear = groupedMoviesList.mean()\n\n # create a line chart out of it\n fig = px.line(\n averageRatingByYear,\n x=\"Year\",\n y=\"IMDB_Rating\",\n title='Average movie rating by year (hover to see average earnings)',\n hover_data=[\"Earnings\"])\n\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n datasource = \"https://www.kaggle.com/harshitshankhdhar/imdb-dataset-of-top-1000-movies-and-tv-shows\"\n return render_template(\"assignment5.html.j2\", graphJSON=graphJSON, datasource=datasource)\n" }, { "alpha_fraction": 0.7819548845291138, "alphanum_fraction": 0.7849624156951904, "avg_line_length": 54.5, "blob_id": "d9f0ee5aa10fd09358c3ef95f0c7bea2887009fa", "content_id": "65278326f697d1de11052d4f9236d4149641f370", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 665, "license_type": "no_license", "max_line_length": 146, "num_lines": 12, "path": "/README.md", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "# Sage DS Assignments\n\nAll the assignments we completed as part of the sage DS program. \n\n## Flask App\n\nI have tried flask to build the app this time. Flask uses a templating engine on top of HTML which provides much more flexibility compared to DCC.\nAlso, deploying as a multi-page app means we can keep adding to the same application and hence go around the heroku 5 apps limitation. \n\nI have added 3 different \"types\" of app in the first version. The first one uses inline code to render the page.\nThe second example uses OOPs to declare a class and calls it. The third one uses a static method. I will be using\nthe third method going forward for all assignments" }, { "alpha_fraction": 0.6711968183517456, "alphanum_fraction": 0.7137220501899719, "avg_line_length": 22.275510787963867, "blob_id": "4d752cc849e7076fc56dd01dbdebc1567482ab36", "content_id": "fd5600fae91e0d3589efac15fa159b4e26f77d2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2281, "license_type": "no_license", "max_line_length": 115, "num_lines": 98, "path": "/app.py", "repo_name": "pinaki-das-sage/assignments", "src_encoding": "UTF-8", "text": "import os\n\nfrom flask import Flask, render_template\nimport pandas as pd\n\nfrom assignment5 import Assignment5\nfrom assignment9 import Assignment9\nfrom assignment10 import Assignment10\nfrom assignment11 import Assignment11\nfrom assignment12 import Assignment12\nfrom assignment16 import Assignment16\nfrom assignment17 import Assignment17\nimport plotly.express as px\nimport plotly\nimport json\n\napp = Flask(__name__)\n\n\n# home page\[email protected](\"/\")\ndef home():\n return render_template(\"index.html.j2\")\n\n\n# 404 handler\[email protected](404)\ndef not_found(e):\n return render_template(\"404.html.j2\")\n\n\n# first method - kept it simple here, it is defined right here within the file\[email protected](\"/assignment4\")\ndef assignment4():\n filename = os.path.join(app.root_path, 'data', '4_tax2gdp.csv')\n tax2gdp = pd.read_csv(filename)\n\n # filter some outliers\n tax2gdp2 = tax2gdp[tax2gdp['GDP (In billions)'] < 10000]\n\n fig = px.bar(x=tax2gdp2[\"Tax Percentage\"],\n y=tax2gdp2[\"GDP (In billions)\"]\n )\n fig.update_layout(\n title='Tax rate by GDP for countries. Still WIP. Need to figure out how to add the country name on hover.',\n showlegend=True)\n\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n\n return render_template(\"assignment4.html.j2\", graphJSON=graphJSON)\n\n\n# second method - this is defined in its own file and we just call the method\[email protected](\"/assignment5\")\ndef assignment5():\n obj = Assignment5()\n return obj.process()\n\n\n# ninth assignment - static function used\[email protected](\"/assignment9\")\ndef assignment9():\n return Assignment9.process()\n\n\[email protected](\"/assignment10\")\ndef assignment10():\n return Assignment10.process()\n\n\[email protected](\"/assignment11\")\ndef assignment11():\n return Assignment11.process()\n\n\[email protected](\"/assignment12\")\ndef assignment12():\n return Assignment12.process()\n\n\[email protected](\"/assignment16\")\ndef assignment16():\n return Assignment16.process()\n\n\[email protected](\"/assignment17\")\ndef assignment17():\n return Assignment17.process()\n\n\n# background process happening without any refreshing\[email protected]('/assignment10_predict', methods=['POST'])\ndef assignment10_predict():\n return Assignment10.predict()\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n" } ]
11
SamuelAdamsMcGuire/twitter_slackbot
https://github.com/SamuelAdamsMcGuire/twitter_slackbot
d35e3d63c9edf305c229547db57a3112ebab2ea8
2176bbb4b7b05ccda3db372e823b1fe9da4edaef
e2cab50044bfe8c46dc20e4d00d7f837ebda57ef
refs/heads/master
2023-04-28T10:40:26.380424
2021-05-26T15:04:47
2021-05-26T15:04:47
255,368,593
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7624703049659729, "alphanum_fraction": 0.7672209143638611, "avg_line_length": 29.071428298950195, "blob_id": "19937632b01e79af28a42380bdf346193d6e4e59", "content_id": "88452c4212268a5e6bb4acb9a7c300bf74f6b7dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 421, "license_type": "no_license", "max_line_length": 66, "num_lines": 14, "path": "/tweet_collector/Dockerfile", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "# Use an official Python runtime as a parent image\nFROM python:3.6-slim\n\n# Set the working directory to /code_tweet\nWORKDIR /code_tweet\n\n# Get all needed requirements to fun environment\nCOPY requirements.txt /code_tweet\n#COPY tweets_git.py /code_tweet\n#COPY config.py /code_tweet\nRUN pip install --trusted-host pypi.python.org -r requirements.txt\n\n# Run app.py when the container launches\nCMD [\"python\", \"tweets_git.py\"]\n" }, { "alpha_fraction": 0.7194244861602783, "alphanum_fraction": 0.7482014298439026, "avg_line_length": 68.5, "blob_id": "fcf187cab24f1c6c1f77999c4b23b7c117763e1e", "content_id": "abf8e1043fa66ab7fdaac31a00cca0d021956767", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 139, "license_type": "no_license", "max_line_length": 70, "num_lines": 2, "path": "/slackbot/config_example.py", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "URL = 'get link from slack after you make slackbot app insert here'\nconn_string = 'postgresql://<password>:<password>@my_postgresdb:5432/'\n" }, { "alpha_fraction": 0.7006802558898926, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 15.333333015441895, "blob_id": "3aec6d206d5ba585fdfa9615e6b7dd1239f0065c", "content_id": "77b565a6a6b11a3318b0acaf62b898bf852d50de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 147, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/etl/Dockerfile", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "FROM python:3.6-slim\n\nWORKDIR /code_etl\n\nCOPY requirements.txt /code_etl\nRUN pip install -r requirements.txt\n#ADD . /etl\n\nCMD [\"python\", \"etl.py\"]\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.72620689868927, "avg_line_length": 28, "blob_id": "2fb368bcf7d2ebfeef7a4f0e896cb9fbd22b31be", "content_id": "64038311525206abdd472ec79a4c81e61b4b68e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1450, "license_type": "no_license", "max_line_length": 79, "num_lines": 50, "path": "/etl/etl.py", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "'''\nExtract tweets from mongoDB, transform them via a sentiment analysis, \nthen load them into postgresqlDB\n'''\nimport logging\nimport pymongo\nimport config\nimport pandas as pd\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom sqlalchemy import create_engine\n\n# set up logging\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s- %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='../logs/log_etl.log', filemode='w'\n)\n\n# connect to mongodb locally or via docker\n# mdb = pymongo.MongoClient(\"mongodb://0.0.0.0:27018/\")\nmdb = pymongo.MongoClient(config.mongo_conn)\nlogging.info(mdb)\n\n# collect results\nresult = mdb.twitter.tweets.find()\n\n# set up sentiment analyzer\ns = SentimentIntensityAnalyzer()\n\n# loop through the cursor object retrieved from Mongodb and extract wanted data\ntweet_list = []\nfor docs in result:\n sentiment = s.polarity_scores(docs['text'])['compound']\n row = (docs['text'], sentiment, docs['date'])\n tweet_list.append(row)\n\nlogging.info(tweet_list)\n# put the list into a pandas dataframe\ndf = pd.DataFrame(tweet_list, columns=['text', 'sentiment', 'timestamp'])\nlogging.info(df)\n\ndb = create_engine(f'{config.post_conn}sentiment_db')\n\n# set up for exceptions for connection\ntry:\n df.to_sql('sentiment', db, if_exists='append', index=False)\n logging.info('db updated')\nexcept ConnectionError:\n logging.exception('Connection not made!')\n" }, { "alpha_fraction": 0.868852436542511, "alphanum_fraction": 0.8852459192276001, "avg_line_length": 9.166666984558105, "blob_id": "18f528a476eb8b9aba798ee9c3a11f9ca096cee9", "content_id": "b48365f002494e435c6bd34cbf5fba6aa40f7385", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 61, "license_type": "no_license", "max_line_length": 15, "num_lines": 6, "path": "/slackbot/requirements.txt", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "slack\nslackclient\npandas\nsqlalchemy\npsycopg2-binary\nrequests\n" }, { "alpha_fraction": 0.7878944873809814, "alphanum_fraction": 0.7894464731216431, "avg_line_length": 70.59259033203125, "blob_id": "479f8b4783156a5769c18d74cf8ca9e3eb4a5455", "content_id": "93fc817cea71ca97a5e5896df08c0a4f8514e405", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1933, "license_type": "no_license", "max_line_length": 561, "num_lines": 27, "path": "/README.md", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "# Docker project\n\nThe goal of this project was building a datapipeline using DOCKER and getting to uderstand the ins and out of DOCKER better. I built 5 containers that work in series to a goal. The first container uses a python script to access the Twitter stream via their API. The stream filters tweets in relation to a specific topic and saves them in MongoDB. The second container is the connection with MongoDB. <br />\n<br />\nFrom here the data is accessed from a third container and then a sentiment analysis is preformed to rate the tweets from -1 to 1. One being very positive and negative one being very negative. The result and the original data is then uploaded to a PostgreSQL data-bank via the fourth docker container.The fifth and final container accesses the analyzed data and runs it through a simple python script that rates each tweet as positive, negative or neutral. This result is then posted to Slack via a Slockbot to a specificed channel in this case twitternews. \n\n## Requirements\n\nInstall Docker: <br />\n<br />\nDocker is made so that programs written can be reproduced remotely by anyone around the world. Therefore the only requirement to run this script other than access tokens and codes to the various websites and databases is DOCKER. Docker will take care of the rest. What you will have to make sure is et up to your system is the docker-compose file. The neccessary portmapping, directory paths and passwords will need to be set for your system.\n\n## Usage\n\nClone repo, personalise the config scripts with required data, ensure Slackchannel in slackbot script is set to the channel you want the results sent to. \n\nThen execute with the following in the CLI: \n\ndocker-compose build <br>\ndocker-compose up\n\n## Contributing\nPull requests are welcome. I am also very open to suggestions on how to improve the code.\n\n## Sources\n[twitter](https://developer.twitter.com/en/apps)\n[Slack](www.slack.com)\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.742222249507904, "avg_line_length": 44, "blob_id": "b714854e68235aa815ec13a97797a628f49e048c", "content_id": "ec819e6c1eea5b8053bcb79c4c3ba71af2ea62db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 70, "num_lines": 5, "path": "/tweet_collector/config_example.py", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "CONSUMER_API_KEY = \"get from tweeter\"\nCONSUMER_API_SECRET = \"get from tweeter\"\nACCESS_TOKEN = 'get from tweeter'\nACCESS_TOKEN_SECRET = 'get from tweeter'\nmongo_conn = \"mongodb://<username>:<password>@my_mongodb:27017/tweets\"\n" }, { "alpha_fraction": 0.4681616723537445, "alphanum_fraction": 0.503045380115509, "avg_line_length": 36.23711395263672, "blob_id": "c3326511f37b85eeaa8495bfa2150515cf9eb73b", "content_id": "7ac4d84d1c76af65e6b44925457ab3d32b539b63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3612, "license_type": "no_license", "max_line_length": 191, "num_lines": 97, "path": "/slackbot/slackbot.py", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "'''\nSend result of sentiment analysis to chosen slack channel\n'''\nimport random\nimport config\nimport logging\nimport requests\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n# set up logging\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s- %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='../logs/log_bot.log', filemode='w'\n)\n\n# use create engine for making the connection to postgress db\nengine = create_engine(f'{config.conn_string}sentiment_db')\n\n# using sqlalchemy to select all rows from the sentiment table the database\nselection = pd.read_sql('SELECT * FROM sentiment;', engine)\n\n# use random number to post random twitter posts from the db\nx = random.randint(0, 20)\n\n# pick out the sentiment of the random tweet\nsenti = float(selection['sentiment'][x])\n\n# post depending on the sentiment. about 0 pos below 0 neg and at 0 neutral\nif senti > 0:\n requests.post(url=config.URL, json={\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"Vader Sentiment analysis of a random tweet:\"\n }\n },\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"*Tweet:*\\n{selection['text'][x]}\\n*When:*\\n{selection['timestamp'][x]}\\n*Vader Sentiment Value:* {senti}\\n*Seems to be:* Positive\\n\"\n },\n \"accessory\": {\n \"type\": \"image\",\n \"image_url\": \"https://4.bp.blogspot.com/-x--m1C5hAyg/U51ZRPFFvTI/AAAAAAAABMo/6FF0bH4L2P4/s1600/4.jpg\",\n \"alt_text\": \"computer thumbnail\"\n }\n }]})\nelif senti < 0:\n requests.post(url=config.URL, json={\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"Vader Sentiment analysis of a random tweet:\"\n }\n },\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"*Tweet:*\\n{selection['text'][x]}\\n*When:*\\n{selection['timestamp'][x]}\\n*Vader Sentiment Value:* {senti}\\n*Seems to be:* Negitive\\n\"\n },\n \"accessory\": {\n \"type\": \"image\",\n \"image_url\": \"https://images.squarespace-cdn.com/content/566a4af357eb8d3974390587/1455451431849-3ZR52MOM2N35K0MU7XLM/image-asset.jpeg?content-type=image%2Fjpeg\",\n \"alt_text\": \"computer thumbnail\"\n }\n }]})\nelse:\n requests.post(url=config.URL, json={\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"Vader Sentiment analysis of a random tweet:\"\n }\n },\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"*Tweet:*\\n{selection['text'][x]}\\n*When:*\\n{selection['timestamp'][x]}\\n*Vader Sentiment Value:* {senti}\\n*Seems to be:* Neutral\\n\"\n },\n \"accessory\": {\n \"type\": \"image\",\n \"image_url\": \"https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/95a29505-23c9-4a16-972d-7487dc596931/d37tr27-c6e6605b-27d5-48bb-8f8a-dfbbfe669f13.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwic3ViIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsImF1ZCI6WyJ1cm46c2VydmljZTpmaWxlLmRvd25sb2FkIl0sIm9iaiI6W1t7InBhdGgiOiIvZi85NWEyOTUwNS0yM2M5LTRhMTYtOTcyZC03NDg3ZGM1OTY5MzEvZDM3dHIyNy1jNmU2NjA1Yi0yN2Q1LTQ4YmItOGY4YS1kZmJiZmU2NjlmMTMucG5nIn1dXX0.zZC-R_FnOTvCzNYjPlnSj0bqnw3F7v329BdWe9JVuG8\",\n \"alt_text\": \"computer thumbnail\"\n }\n }]})\n" }, { "alpha_fraction": 0.6642857193946838, "alphanum_fraction": 0.7285714149475098, "avg_line_length": 69, "blob_id": "888cb9f3ebf9913336c548ee211c6cb894824c58", "content_id": "ef06124995b94f72a836ff9768fe3d861824c74a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 70, "num_lines": 2, "path": "/etl/config_example.py", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "mongo_conn = \"mongodb://<username>:<password>@my_mongodb:27017/tweets\"\npost_conn = 'postgresql://<username>:<password>@my_postgresdb:5432/'\n" }, { "alpha_fraction": 0.7250000238418579, "alphanum_fraction": 0.8125, "avg_line_length": 12.333333015441895, "blob_id": "9e62b176b48209e3c1b34b7433994a2c8006379b", "content_id": "72f0e67335792c291196ddab6f8e577d4fbb7999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 80, "license_type": "no_license", "max_line_length": 21, "num_lines": 6, "path": "/etl/requirements.txt", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "vaderSentiment==3.2.1\npandas\nsqlalchemy\npsycopg2-binary\npymongo==3.7.1\nrequests\n" }, { "alpha_fraction": 0.6984127163887024, "alphanum_fraction": 0.7066431641578674, "avg_line_length": 26.88524627685547, "blob_id": "2cc65732ad57056599b43c4909e3ea2f9e4ecfc0", "content_id": "54624dca5e46abfa234d498e52888f2cff26f348", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1701, "license_type": "no_license", "max_line_length": 79, "num_lines": 61, "path": "/tweet_collector/tweets_git.py", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "'''\nTap into twitter and stream tweets into mongoDB database\n'''\nimport config\nimport tweepy\nimport time\nimport pymongo\nimport logging\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s- %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='../logs/log_tweets.log', filemode='w'\n)\n\n# authorization keys are called from a config file to get access to the\n# twitter API see example config file for more info\nauth = tweepy.OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\nauth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n# Connect to Mongo locally or via docker\nclient = pymongo.MongoClient(config.mongo_conn)\n# client = pymongo.MongoClient(\"mongodb://0.0.0.0:27017/\")\n\n# here the database is defined\ndb = client.twitter\n\n\n# have tweepy listen in on the tweets and take the wanted attributes from them\nclass PrintStreamListener(tweepy.StreamListener):\n \"\"\"\n https://github.com/tweepy/tweepy/blob/v3.8.0/tweepy/streaming.py\n \"\"\"\n\n def on_status(self, status):\n # specify what data should be captured from tweets\n tweet = {'date': status.created_at, 'text': status.text}\n # db.twitter.insert_many(tweet)\n db.tweets.insert_one(tweet)\n print(f'{status.created_at}: {status.text}')\n\n\n# set up tweet stream\nstream = tweepy.Stream(\n auth=auth,\n listener=PrintStreamListener())\n\n# filter the stream to your chosen topic, language, filter level\nstream.filter(\n track=['election'],\n languages=['en'],\n filter_level='low',\n is_async=True\n)\n\n# run stream for 5 seconds and then disconnect\ntime.sleep(5)\nstream.disconnect()\n\nlogging.info('tweets collected')\n" }, { "alpha_fraction": 0.7197452187538147, "alphanum_fraction": 0.7324841022491455, "avg_line_length": 16.44444465637207, "blob_id": "a3553b76b75205ba147263f13971e1abf35ca367", "content_id": "c7374877fabf6d815d2a3c1c3837e03872a7cf1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 157, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/slackbot/Dockerfile", "repo_name": "SamuelAdamsMcGuire/twitter_slackbot", "src_encoding": "UTF-8", "text": "FROM python:3.6-slim\n\nWORKDIR /code_bot\n\nCOPY requirements.txt /code_bot\nRUN pip install -r requirements.txt\n#ADD . /slackbot\n\nCMD [\"python\", \"slackbot.py\"]\n" } ]
12
CxzPink/GCN
https://github.com/CxzPink/GCN
3fb9415485e6656a0aa0c05fa4a63cb1ab4618ae
49cd443fa13c7306062f85588179f799b3f67434
6a62ad71352d1b22c52fcea81f16d456b1f9fb7a
refs/heads/master
2023-02-04T17:47:03.911697
2020-12-30T06:08:50
2020-12-30T06:08:50
323,034,736
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6204655170440674, "alphanum_fraction": 0.6269658207893372, "avg_line_length": 34.33333206176758, "blob_id": "1d30da6b0efdc3dace0e3637e82988db52c93b89", "content_id": "bc46487f3fb0e6c78d0cea7de509fd99548d1022", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4769, "license_type": "no_license", "max_line_length": 128, "num_lines": 135, "path": "/utils/process.py", "repo_name": "CxzPink/GCN", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pickle as pkl\nimport networkx as nx\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg.eigen.arpack import eigsh\nimport sys\nimport torch\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport scipy.io as sio\n\ndef parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\ndef load_data(dataset_str): \n \"\"\"Load data.\"\"\"\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"data/ind.{}.{}\".format(dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\"data/ind.{}.test.index\".format(dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n \n features, _ = preprocess_features(features)\n adj = normalize_adj(adj + sp.eye(adj.shape[0]))\n\n features = torch.FloatTensor(np.array(features))\n labels = torch.LongTensor(np.where(labels)[1])\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n return adj, features, labels, idx_train, idx_val, idx_test\n\ndef preprocess_features(features):\n \"\"\"Row-normalize feature matrix and convert to tuple representation\"\"\"\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense(), sparse_to_tuple(features)\n\ndef normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\ndef sparse_to_tuple(sparse_mx, insert_batch=False):\n \"\"\"Convert sparse matrix to tuple representation.\"\"\"\n \"\"\"Set insert_batch=True if you want to insert a batch dimension.\"\"\"\n def to_tuple(mx):\n if not sp.isspmatrix_coo(mx):\n mx = mx.tocoo()\n if insert_batch:\n coords = np.vstack((np.zeros(mx.row.shape[0]), mx.row, mx.col)).transpose()\n values = mx.data\n shape = (1,) + mx.shape\n else:\n coords = np.vstack((mx.row, mx.col)).transpose()\n values = mx.data\n shape = mx.shape\n return coords, values, shape\n\n if isinstance(sparse_mx, list):\n for i in range(len(sparse_mx)):\n sparse_mx[i] = to_tuple(sparse_mx[i])\n else:\n sparse_mx = to_tuple(sparse_mx)\n\n return sparse_mx\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\ndef accuracy(output, labels):\n preds = output.max(1)[1].type_as(labels)\n correct = preds.eq(labels).double()\n correct = correct.sum()\n return correct / len(labels)\n\ndef plot_eigenvector(adj, size):\n adj_ = normalize_adj(adj + sp.eye(adj.shape[0]))\n a,b = sp.linalg.eigs(adj_, k=size, which='LR')\n plt.imshow(abs(b))\n plt.colorbar()\n plt.show()\n plt.savefig(\"Eigenvector.png\")\n\ndef save_as_mat(adj,size):\n adj_ = normalize_adj(adj + sp.eye(adj.shape[0]))\n a,b = sp.linalg.eigs(adj_, k=size, which='LR')\n sio.savemat('Saved_data.mat', {'adjacent_matrix': adj, 'normalized_adjacent_matrix': adj_,'eigenvalue': a,'eigenvector': b})\n\ndef plot_graph(adj):\n adj = sp.coo_matrix(adj)\n temp = np.vstack((adj.row , adj.col , adj.data)).transpose()\n edges = temp.tolist()\n G = nx.Graph()\n H = nx.path_graph(adj.shape[0]) \n G.add_nodes_from(H)\n G.add_weighted_edges_from(edges)\n colors = np.arange(adj.shape[0])\n nx.draw(G,pos = nx.spring_layout(G),node_color = 'b',edge_color = 'r',with_labels = False,node_size =0.1,width =0.1)\n plt.show()\n plt.savefig(\"Graph_connection.png\")" }, { "alpha_fraction": 0.5481348037719727, "alphanum_fraction": 0.5559566617012024, "avg_line_length": 30.37735939025879, "blob_id": "da540b68942342636007c55e3f81bd46da8abe39", "content_id": "380eccc16ac04605287c3d57767848dd69917a52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1662, "license_type": "no_license", "max_line_length": 92, "num_lines": 53, "path": "/Layers/layer.py", "repo_name": "CxzPink/GCN", "src_encoding": "UTF-8", "text": "import math\nimport torch\nimport numpy as np\n\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.module import Module\n\n\nclass GraphConvolution(Module):\n def __init__(self, in_features, out_features, bias=True):\n super(GraphConvolution, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.k_cheby = 1\n self.weight = Parameter(torch.FloatTensor(in_features * self.k_cheby, out_features))\n if bias:\n self.bias = Parameter(torch.FloatTensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input, adj):\n Xt = chebyshev(adj, input, self.k_cheby)\n support = torch.mm(Xt, self.weight)\n #output = support\n output = torch.spmm(adj, support)\n if self.bias is not None:\n return output + self.bias\n else:\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ')'\n\ndef chebyshev(L, X, K):\n M, N = X.shape\n Xt = torch.empty((K, M, N))\n Xt[0, ...] = X\n if K > 1:\n Xt[1, ...] = torch.mm(L, X)\n for k in range(2, K):\n Xt[k, ...] = 2 * torch.mm(L, Xt[k-1, ...]) - Xt[k-2, ...] \n Xt = Xt.permute(1,2,0)\n Xt = torch.reshape(Xt, [M, N*K])\n return Xt" } ]
2
DicksonChi/format_phone_number
https://github.com/DicksonChi/format_phone_number
58af70ebdbf44065275a146b3b5ec9e6c039d887
2cfb12cf7519a4fd0e89f92b6ebbb9134fc47f1d
9cc71c9f5dc42121e7090cb4cc8dee39a95ffb13
refs/heads/master
2020-12-28T10:33:58.373851
2020-02-04T20:41:00
2020-02-04T20:41:00
238,291,844
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5258503556251526, "alphanum_fraction": 0.5387755036354065, "avg_line_length": 25.25, "blob_id": "e2d20d643c52ccbd399d51729187439ef4962b90", "content_id": "f5ba92c67b3244e9aa9a1446fa423935f7259ea8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1470, "license_type": "no_license", "max_line_length": 84, "num_lines": 56, "path": "/format_number.py", "repo_name": "DicksonChi/format_phone_number", "src_encoding": "UTF-8", "text": "import csv\nimport re\n\n\ndef st_str(s):\n \"\"\"Strip out all hyphen and space from the string.\"\"\"\n s = re.sub('[- ]', '', s)\n return s\n\n\ndef get_amount_hyphen(s):\n \"\"\"Get the amount of hyphen needed.\"\"\"\n if len(s) % 3 > 0:\n return (len(s) - (len(s) % 3))/3\n else:\n return (len(s)/3) - 1\n\n\ndef add_hyphen_to_string(s):\n \"\"\"Add the hyphen to the stripped out string to form a formatted phone number\"\"\"\n s = st_str(s)\n amt_hyphen = get_amount_hyphen(s)\n\n j_start = 0\n j_end = 3\n new_number = ''\n for i in range(0, int(amt_hyphen)):\n if i+1 == amt_hyphen and len(s[j_end:]) == 1:\n new_number = '{}{}-'.format(new_number, s[j_start:j_end-1])\n j_start += 2\n else:\n new_number = '{}{}-'.format(new_number, s[j_start:j_end])\n j_start += 3\n j_end += 3\n return '{}{}'.format(new_number, s[j_start:])\n\n\ndef format_rows(path_to_csv):\n \"\"\"Format phone numbers of all data from csv and return array\"\"\"\n # run through the rows\n result = []\n line = -1\n with open(path_to_csv) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n for row in csv_reader:\n if line > -1:\n result.append(add_hyphen_to_string(row[0]))\n line += 1\n print('{} phone numbers have been formatted.'.format(line))\n print(result)\n\n\nif __name__ == '__main__':\n # format numbers\n format_rows('phone_numbers.csv')\n" }, { "alpha_fraction": 0.633273720741272, "alphanum_fraction": 0.7191413044929504, "avg_line_length": 30.05555534362793, "blob_id": "24f3ccc0e0e986a8c30d0a8bbf006909d4f569db", "content_id": "2bbe9062da4f9fbfd465d17138dbd33a43fc91f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 559, "license_type": "no_license", "max_line_length": 120, "num_lines": 18, "path": "/README.md", "repo_name": "DicksonChi/format_phone_number", "src_encoding": "UTF-8", "text": "# format_phone_number\n\n## Installation\n```\n git clone [email protected]:DicksonChi/format_phone_number.git\n cd format_phone_number\n python format_number.py\n```\n\nYou can add your numbers to the `phone_numbers.csv` file\n\n#### How it works.\nThis goes through a CSV file with phone numbers that are in wrong format eg \"0 - 22 1985--324\" and the is expected to \nformat them into a string with group of 3's separated with a hyphen, with a condition that the last group must not be 1.\n\neg. \"0 - 22 1985--324\" will give \"022-198-53-24\" \n\n\"333 -3 -3-333\" will give \"333-333-33\"\n" } ]
2
TristanThrush/sal
https://github.com/TristanThrush/sal
2e2aa80f99e2fbbfb7d9ab48155023add1abd12f
c28ab47eba656229bd0f0542166b1f6aef29b805
1910915f0dda4c9892764ab3dd82c94e51de5409
refs/heads/master
2020-07-06T06:00:27.066390
2019-08-18T20:40:39
2019-08-18T20:40:39
202,914,948
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5677320957183838, "alphanum_fraction": 0.5696346759796143, "avg_line_length": 35, "blob_id": "559f88d5b0307ea28cb07102af91f70044adb748", "content_id": "605d90c295281376305e3eae75ef9e472404dce3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2628, "license_type": "no_license", "max_line_length": 78, "num_lines": 73, "path": "/sal/generator.py", "repo_name": "TristanThrush/sal", "src_encoding": "UTF-8", "text": "import dill\nimport random\n\n\nclass OperatorGenerator:\n ARGUMENT_UPPER_LIMIT = 8\n off_limit_productions = set()\n temporary_off_limit_productions = set()\n\n @staticmethod\n def load_off_limit_productions(file):\n operator_traces = dill.load(open(file, 'rb'))\n for operator_trace in operator_traces:\n OperatorGenerator.off_limit_productions |= set(\n operator_trace[1])\n\n @staticmethod\n def generate(atoms, terminals):\n OperatorGenerator.current_arguments = 0\n operator = OperatorGenerator.generate_recursive(\n atoms, terminals)\n while operator is None or operator.innerese_and_english().innerese in\\\n OperatorGenerator.off_limit_productions.union(\n OperatorGenerator.temporary_off_limit_productions):\n OperatorGenerator.current_arguments = 0\n operator = OperatorGenerator.generate_recursive(\n atoms, terminals)\n return operator\n\n @staticmethod\n def generate_recursive(atoms, terminals):\n symbol = random.choice(atoms)\n arguments = []\n new_name = symbol.name + ' ('\n for argument in symbol.arguments:\n OperatorGenerator.current_arguments += 1\n if OperatorGenerator.current_arguments >\\\n OperatorGenerator.ARGUMENT_UPPER_LIMIT:\n return None\n if argument == 'p':\n next_symbol = OperatorGenerator.generate_recursive(\n atoms, terminals)\n if next_symbol is None:\n return None\n if argument == 't':\n next_symbol = random.choice(terminals)\n if argument == 'tl':\n next_symbol = random.choice(list(filter(\n lambda terminal: '(' not in terminal, terminals)))\n arguments.append(next_symbol)\n if next_symbol not in terminals:\n new_name += ' ' + next_symbol.name\n else:\n new_name += ' ' + next_symbol\n new_name += ' )'\n\n if new_name == symbol.name + ' ( )':\n new_name = symbol.name\n\n class Operator():\n def __init__(self, name):\n self.name = name\n\n def function(self):\n # Note that this is a different method than an atomic\n # operator's function() method.\n return symbol.function(arguments)\n\n def innerese_and_english(self):\n return symbol.innerese_and_english(arguments)\n\n new_operator = Operator(new_name)\n return new_operator\n" }, { "alpha_fraction": 0.7571801543235779, "alphanum_fraction": 0.7691159844398499, "avg_line_length": 31.301204681396484, "blob_id": "50af2ba85535c880e602b357151acfac7a188e7d", "content_id": "66450eb1092ec9fefd2875d3bd0c344e605250d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2681, "license_type": "no_license", "max_line_length": 368, "num_lines": 83, "path": "/README.md", "repo_name": "TristanThrush/sal", "src_encoding": "UTF-8", "text": "The following instructions were tested with OSX and Ubuntu with and without CUDA.\n\nNote that I use Git LFS, so you should clone this repository with ```git lfs clone```.\n\nFirst, add all of the directories (sal/sal, sal/trained_models, sal/domains) to your Python path. You may also need to install the relevant Python package dependencies for this project. The following commands, entered in a Python 3 terminal, show how to use SAL. Note that everywhere you see ```<desired model directory>```, an absolute path must be typed.\n\n\nTo initialize SAL with self referential operators:\n\n```python\nimport sal\nimport manipulation\ns = sal.SAL()\ns.atoms = manipulation.ManipulationAtoms()\ns.problems = manipulation.ManipulationProblems()\n```\n\n\nTo initialize SAL without the self-referential operators:\n\n```python\nimport sal\nimport manipulation\ns = sal.SAL()\ns.atoms = manipulation.ManipulationAtoms()\ns.problems = manipulation.ManipulationProblems()\ndel s.atoms.atoms['then']\ndel s.atoms.atoms['remember']\ndel s.atoms.atoms['forget']\ndel s.atoms.atoms['internalize']\ndel s.atoms.atoms['externalize']\n```\n\n\nNote that after SAL's problems are initialized, a simulator visual file will be printed, (such as '.49456.txt'). You will get a FileNotFoundError if you try to access this visualization too soon. But after SAL starts solving problems, you can open up another terminal and enter the following command to see a real time visualization of the problem that SAL is solving:\n\n```python\nimport manipulation\nmanipulation.SimulatorVisualizer.visualize('.<number given>.txt')\n```\n\n\nTrained SAL instances are provided in sal/trained_models/manipulation_self_referential and sal/trained_models/manipulation_not_self_referential.\nTo load a saved SAL instance:\n\n```python\ns.load(<desired model directory>)\n```\n\n\nTo evaluate SAL's performance (first making |G(.,.)| and |G(.,.)_e| something reasonable):\n\n```python\ns.learner.G = 1000 # Change to 150 if evaluating SAL without self-referential operators\ns.learner.GE = 1000 # Change to 150 if evaluating SAL without self-referential operators\ns.perform(1)\n```\n\n\nTo train SAL yourself for 40 epochs, followed by saving your model:\n\n```python\ns.learner.G = 500 # Change to 150 if training SAL without self-referential operators\ns.learner.GE = 2\ns.learn(40)\ns.save(<desired model directory>)\n```\n\n\nTo run the generalization tests from my thesis, when SAL has the self-referential operators:\n\n```python\nimport analysis\nanalysis.test_generalizability(s)\n```\n\n\nTo print a latex-friendly readout of the entire training history of a saved SAL instance:\n\n```python\nimport analysis\nanalysis.latexable_innerese_operator_traces(<desired model directory>)\n```\n" }, { "alpha_fraction": 0.508974015712738, "alphanum_fraction": 0.5105813145637512, "avg_line_length": 26.651851654052734, "blob_id": "20b4f8d45de5ee70db1ed096001f4bcffff9ff34", "content_id": "5001e7ca3f69bb4f2efa5bab4d2b2b29dc93b463", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3733, "license_type": "no_license", "max_line_length": 79, "num_lines": 135, "path": "/sal/sal.py", "repo_name": "TristanThrush/sal", "src_encoding": "UTF-8", "text": "import dill\nimport os\nfrom learner import Learner\n\n\nclass InnereseAndEnglish:\n def __init__(self, innerese, english):\n self.innerese = innerese\n self.english = english\n\n def __hash__(self):\n return hash(self.innerese)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.innerese == other.innerese\n else:\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __str__(self):\n return '[ ' + self.innerese + ', ' + self.english + ' ]'\n\n\nclass Atom:\n def __init__(self, name, arguments=[]):\n self.name = name\n self.arguments = arguments\n self.sal = None\n\n def function(self, args):\n pass\n\n def innerese_and_english(self, args):\n pass\n\n\nclass Problems:\n def __init__(self):\n pass\n\n def names(self):\n return self.problems.keys()\n\n def choice(self, name):\n pass\n\n\nclass Atoms:\n def __init__(self):\n pass\n\n def names(self):\n return self.atoms.keys()\n\n def choice(self, name):\n return self.atoms[name]\n\n def connect(self, sal):\n for atom in self.atoms.values():\n atom.sal = sal\n\n\nclass SAL():\n def __init__(self):\n self.learner = Learner(self)\n self.atoms = None\n self.problems = None\n\n def learn(self, epochs):\n self.perform(epochs, optimize=True, help=True)\n\n def perform(self, epochs, optimize=False, help=False, cutoff=float('inf')):\n self.atoms.connect(self)\n self.optimize = optimize\n self.help = help\n self.operator_traces = []\n if self.optimize:\n self.learner.merge_module.train()\n else:\n self.learner.merge_module.eval()\n for epoch in range(epochs):\n for problem_name in self.problems.names():\n steps = 0\n self.operator_traces.append((problem_name, []))\n self.problem_state_and_goal, reward = self.problems.choice(\n problem_name)\n while reward < 0 and steps < cutoff:\n reward = self.step()\n steps += 1\n if reward > 0:\n print('\\nSolved: ', problem_name)\n else:\n print('\\nCutt off on: ', problem_name)\n print('Epoch:', epoch)\n print()\n\n def step(self, forced_operator=None):\n innerese_problem_state_and_goal = list(map(\n lambda innerese_and_english: innerese_and_english.innerese,\n self.problem_state_and_goal))\n self.problem_state_and_goal, reward, operator =\\\n self.learner.operate(\n innerese_problem_state_and_goal,\n forced_operator=forced_operator)\n print(\n operator.innerese_and_english().innerese + ',',\n end=' ',\n flush=True)\n if self.optimize:\n self.learner.optimize()\n return reward\n\n def save(self, directory):\n os.makedirs(directory)\n dill.dump(\n list(\n map(\n lambda operator_trace: [\n operator_trace[0],\n list(\n map(\n lambda operator:\n operator.innerese_and_english().innerese,\n operator_trace[1]))],\n self.operator_traces)),\n open(\n directory + '/innerese_operator_traces.b',\n 'wb'))\n self.learner.save(directory)\n\n def load(self, directory):\n self.learner.load(directory)\n" }, { "alpha_fraction": 0.574113130569458, "alphanum_fraction": 0.5808245539665222, "avg_line_length": 39.42635726928711, "blob_id": "bcb69db777917aefcf72734aea8471cfae867a3e", "content_id": "19843cb15bb1244a683c2eff14d1d8745f05e267", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5215, "license_type": "no_license", "max_line_length": 79, "num_lines": 129, "path": "/sal/self_referential_atoms.py", "repo_name": "TristanThrush/sal", "src_encoding": "UTF-8", "text": "from sal import InnereseAndEnglish, Atom\nfrom generator import OperatorGenerator\nimport random\n\n\nclass Then(Atom):\n def __init__(self, name):\n super(Then, self).__init__(name, arguments=['p', 'p'])\n\n def function(self, args):\n self.sal.step(args[0])\n reward = self.sal.step(args[1])\n return self.sal.problem_state_and_goal, reward\n\n def innerese_and_english(self, args):\n return InnereseAndEnglish(\n '( then ' + args[0].innerese_and_english().innerese + ' '\n + args[1].innerese_and_english().innerese + ' )',\n args[0].innerese_and_english().english + ' then '\n + args[1].innerese_and_english().english)\n\n\nclass Forget(Atom):\n def __init__(self, name):\n super(Forget, self).__init__(name, arguments=['p'])\n\n def function(self, args):\n new_problem_state_and_goal = []\n for innerese_and_english in self.sal.problem_state_and_goal:\n if innerese_and_english.innerese != '( remember i ' + \\\n args[0].innerese_and_english().innerese + ' )':\n new_problem_state_and_goal.append(\n innerese_and_english)\n return new_problem_state_and_goal, -0.1\n\n def innerese_and_english(self, args):\n return InnereseAndEnglish(\n '( forget ' + args[0].innerese_and_english().innerese + ' )',\n 'forget ' + args[0].innerese_and_english().english)\n\n\nclass Remember(Atom):\n def __init__(self, name):\n super(Remember, self).__init__(name, arguments=['p'])\n\n def function(self, args):\n innerese_operator_traces =\\\n list(map(lambda operator: operator.innerese_and_english().innerese,\n self.sal.operator_traces[-1][1]))\n for innerese_operator in set(innerese_operator_traces):\n if args[0].innerese_and_english().innerese == innerese_operator:\n self.sal.problem_state_and_goal =\\\n self.sal.atoms.choice('forget').function(args)[0]\n self.sal.problem_state_and_goal.append(InnereseAndEnglish(\n '( remember i ' + args[0].innerese_and_english().innerese\n + ' )',\n 'i remember ' + args[0].innerese_and_english().english))\n return self.sal.problem_state_and_goal, -0.1\n\n def innerese_and_english(self, args):\n return InnereseAndEnglish(\n '( remember ' + args[0].innerese_and_english().innerese + ' )',\n 'remember ' + args[0].innerese_and_english().english)\n\n\nclass Externalize(Atom):\n def __init__(self, name):\n super(Externalize, self).__init__(name)\n\n def function(self, args):\n print()\n for item in self.sal.problem_state_and_goal:\n print(item.english + '.', end=\" \", flush=True)\n print()\n return self.sal.problem_state_and_goal, -0.1\n\n def innerese_and_english(self, args):\n return InnereseAndEnglish('externalize', 'externalize')\n\n\nclass Internalize(Atom):\n def __init__(self, name):\n super(Internalize, self).__init__(name)\n self.automatic_help_list = ['place oil jug on stool', 'pick oil jug']\n\n def function(self, args):\n if self.sal.help:\n help_english = None\n if self.sal.operator_traces[-1][0] == 'train':\n for operator in self.sal.operator_traces[-1][1]:\n if operator.innerese_and_english().innerese ==\\\n 'externalize':\n help_english = random.choice(self.automatic_help_list)\n else:\n help_english = None\n # Try to generate an operator that corresponds to the english input.\n help_operator = None\n innerese_problem_state_and_goal = list(map(\n lambda innerese_and_english: innerese_and_english.innerese,\n self.sal.problem_state_and_goal))\n\n self.sal.learner.merge_module(innerese_problem_state_and_goal)\n terminals = list(self.sal.learner.merge_module.constituent_set)\n\n if help_english is not None:\n iter = 0\n beam = 5000\n OperatorGenerator.temporary_off_limit_productions = set()\n while iter < beam and help_operator is None:\n iter += 1\n operator = OperatorGenerator.generate(\n list(map(lambda name: self.sal.atoms.choice(name),\n self.sal.atoms.names())),\n terminals)\n OperatorGenerator.temporary_off_limit_productions.add(\n operator.innerese_and_english().innerese)\n if operator.innerese_and_english().english == help_english:\n help_operator = operator\n OperatorGenerator.temporary_off_limit_productions = set()\n reward = -0.1\n if help_operator is not None:\n print('help: ', help_operator.innerese_and_english().innerese)\n reward = self.sal.step(help_operator)\n else:\n print('FAILED TO INTERNALIZE')\n return self.sal.problem_state_and_goal, reward\n\n def innerese_and_english(self, args):\n return InnereseAndEnglish('internalize', 'internalize')\n" }, { "alpha_fraction": 0.5180969834327698, "alphanum_fraction": 0.5246590971946716, "avg_line_length": 39.807533264160156, "blob_id": "d37e1c755054116156f479474d396fa371d01f75", "content_id": "9b1428f26ffe9b4e143d95cd718cf6a40372b46f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9753, "license_type": "no_license", "max_line_length": 79, "num_lines": 239, "path": "/domains/manipulation.py", "repo_name": "TristanThrush/sal", "src_encoding": "UTF-8", "text": "from sal import Atom, Atoms, Problems, InnereseAndEnglish\nfrom self_referential_atoms import Forget, Remember, Externalize, Internalize,\\\n Then\nimport copy\nimport time\nimport os\nfrom random import randint\nimport atexit\n\n\nclass Simulator:\n def __init__(self, state_dict, goal, simulator_visual_file):\n self.state_dict = state_dict\n self.goal = goal\n self.types = ['movables', 'immovables']\n self.left_gripper = InnereseAndEnglish('( left gripper )',\n 'left gripper')\n self.right_gripper = InnereseAndEnglish('( right gripper )',\n 'right gripper')\n self.simulator_visual_file = simulator_visual_file\n self.save_visual()\n\n def innerese_and_english(self):\n innerese_and_english = []\n for item in self.state_dict:\n for resting in self.state_dict[item]:\n if item not in self.types:\n innerese_and_english.append(InnereseAndEnglish(\n '( on ' + resting.innerese + ' ' + item.innerese\n + ' )',\n resting.english + ' is on ' + item.english))\n if len(self.state_dict[item]) == 0 and item not in\\\n self.state_dict['movables'] and item != self.left_gripper\\\n and item != self.right_gripper and item not in\\\n self.types:\n innerese_and_english.append(InnereseAndEnglish(\n '( on nothing ' + item.innerese + ' )', 'nothing is on '\n + item.innerese))\n innerese_and_english.append(self.goal[1])\n return innerese_and_english\n\n def save_visual(self):\n def string_form(item):\n return item.english[:5] + ' '*(5-len(item.english[:5]))\n vis = ''\n for immovable in self.state_dict['immovables']:\n reverse_sub_vis =\\\n ['|' + '-'*6*len(self.state_dict['movables']) + '|']\\\n + ['']*len(self.state_dict['movables'])\n items_to_draw = self.state_dict[immovable]\n for item in items_to_draw:\n spaces = False\n for iter in range(len(self.state_dict['movables'])):\n if not spaces:\n reverse_sub_vis[iter+1] += string_form(item) + ' '\n if len(self.state_dict[item]) != 0:\n # Movable objects can only fit one thing on top.\n item = self.state_dict[item][0]\n else:\n spaces = True\n else:\n reverse_sub_vis[iter+1] += ' '*6\n vis += '\\n'.join(reversed(reverse_sub_vis)) + '\\n'\n if len(self.state_dict[self.left_gripper]) != 0:\n vis += string_form(self.state_dict[self.left_gripper][0]) + ' '\n else:\n vis += ' '*6\n if len(self.state_dict[self.right_gripper]) != 0:\n vis += string_form(self.state_dict[self.right_gripper][0]) + ' '\n else:\n vis += ' '*6\n vis += '\\n[_] [_]'\n visual_save_location = os.path.dirname(os.path.abspath(__file__)) +\\\n '/../' + self.simulator_visual_file\n open(visual_save_location, 'w+').write(vis)\n\n def reward(self):\n if self.goal[0][0] in self.state_dict[self.goal[0][1]]:\n return 1\n return -0.1\n\n def find_and_remove(self, x):\n for item in self.state_dict:\n if item not in self.types:\n for resting in self.state_dict[item]:\n if resting == x:\n self.state_dict[item].remove(x)\n\n def find_top(self, x):\n if len(self.state_dict[x]) == 0:\n return x\n return self.find_top(self.state_dict[x][0])\n\n def pick(self, x):\n if x in self.state_dict['movables'] and len(self.state_dict[x]) == 0:\n if len(self.state_dict[self.left_gripper]) == 0:\n self.find_and_remove(x)\n self.state_dict[self.left_gripper].append(x)\n elif len(self.state_dict[self.right_gripper]) == 0:\n self.find_and_remove(x)\n self.state_dict[self.right_gripper].append(x)\n self.save_visual()\n return self.innerese_and_english(), self.reward()\n\n def place(self, x, y):\n if x in self.state_dict[self.left_gripper] +\\\n self.state_dict[self.right_gripper] and y not in\\\n self.state_dict[self.left_gripper] +\\\n self.state_dict[self.right_gripper]:\n if y in self.state_dict['immovables']:\n self.find_and_remove(x)\n self.state_dict[y].append(x)\n if y in self.state_dict['movables']:\n if len(self.state_dict[y]) == 0:\n self.find_and_remove(x)\n self.state_dict[y].append(x)\n self.save_visual()\n return self.innerese_and_english(), self.reward()\n\n\nclass SimulatorVisualizer:\n\n @staticmethod\n def visualize(simulator_visual_file):\n vis = None\n while True:\n time.sleep(0.1)\n if vis is not None:\n print('\\033[K\\033[A'*(vis.count('\\n')+2))\n vis = open(os.path.dirname(\n os.path.abspath(__file__)) + '/../'\n + simulator_visual_file, 'r').read()\n print(vis)\n\n\nclass Pick(Atom):\n def __init__(self, name):\n super(Pick, self).__init__(name, arguments=['t'])\n\n def function(self, args):\n _, _, recovered_english = self.sal.learner.merge_module.parse(\n args[0])\n problem_state_and_goal, reward =\\\n self.sal.problems.active_problem.pick(InnereseAndEnglish(\n args[0], recovered_english))\n # Don't erase info about the problem that is not about the state of the\n # simulator.\n for production in self.sal.problem_state_and_goal:\n if production.innerese.\\\n startswith('( remember i'):\n problem_state_and_goal.append(production)\n return problem_state_and_goal, reward\n\n def innerese_and_english(self, args):\n innerese = '( ' + self.name + ' ' + args[0] + ' )'\n _, _, recovered_english = self.sal.learner.merge_module.parse(\n innerese)\n return InnereseAndEnglish(innerese, recovered_english)\n\n\nclass Place(Atom):\n def __init__(self, name):\n super(Place, self).__init__(name, arguments=['t', 't'])\n\n def function(self, args):\n _, _, recovered_english_0 = self.sal.learner.merge_module.parse(\n args[0])\n _, _, recovered_english_1 = self.sal.learner.merge_module.parse(\n args[1])\n problem_state_and_goal, reward =\\\n self.sal.problems.active_problem.place(InnereseAndEnglish(\n args[0], recovered_english_0), InnereseAndEnglish(\n args[1], recovered_english_1))\n # Don't erase info about the problem that is not about the state of the\n # simulator.\n for production in self.sal.problem_state_and_goal:\n if production.innerese.\\\n startswith('( remember i'):\n problem_state_and_goal.append(production)\n return problem_state_and_goal, reward\n\n def innerese_and_english(self, args):\n innerese = '( ' + self.name + ' ( on ' + args[0] + ' '\\\n + args[1] + ' ) )'\n _, _, recovered_english = self.sal.learner.merge_module.parse(\n innerese)\n return InnereseAndEnglish(innerese, recovered_english)\n\n\nclass ManipulationProblems(Problems):\n def __init__(self):\n super(Problems, self).__init__()\n\n self.simulator_visual_file = '.' + str(randint(10000, 99999)) + '.txt'\n print('Simulator visual file:', self.simulator_visual_file)\n atexit.register(lambda: os.remove(\n os.path.dirname(os.path.abspath(__file__))\n + '/../' + self.simulator_visual_file))\n\n tire = InnereseAndEnglish('tire', 'tire')\n oil_jug = InnereseAndEnglish('( oil jug )', 'oil jug')\n workbench = InnereseAndEnglish('workbench', 'workbench')\n stool = InnereseAndEnglish('stool', 'stool')\n left_gripper = InnereseAndEnglish('( left gripper )', 'left gripper')\n right_gripper = InnereseAndEnglish('( right gripper )',\n 'right gripper')\n self.problems = {\n\n 'train': [{\n 'movables': [tire, oil_jug],\n tire: [],\n oil_jug: [],\n 'immovables': [workbench, stool],\n workbench: [oil_jug],\n stool: [tire],\n left_gripper: [],\n right_gripper: []},\n [[oil_jug, stool], InnereseAndEnglish(\n '( want i ( on ( oil jug ) stool ) )',\n 'i want oil jug on stool')]]}\n\n def choice(self, name):\n self.active_problem = Simulator(copy.deepcopy(self.problems[name][0]),\n copy.deepcopy(self.problems[name][1]),\n self.simulator_visual_file)\n return (self.active_problem.innerese_and_english(),\n self.active_problem.reward())\n\n\nclass ManipulationAtoms(Atoms):\n def __init__(self):\n self.atoms = {\n 'place': Place('place'),\n 'pick': Pick('pick'),\n 'externalize': Externalize('externalize'),\n 'internalize': Internalize('internalize'),\n 'forget': Forget('forget'),\n 'remember': Remember('remember'),\n 'then': Then('then')}\n" }, { "alpha_fraction": 0.4684866964817047, "alphanum_fraction": 0.4868554174900055, "avg_line_length": 47.63934326171875, "blob_id": "45ab504c02a8fa2f8438cf1e35f9842b211f5674", "content_id": "7a46c135fed8af711d2f1c7df895dd2a9136f971", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5935, "license_type": "no_license", "max_line_length": 79, "num_lines": 122, "path": "/trained_models/analysis.py", "repo_name": "TristanThrush/sal", "src_encoding": "UTF-8", "text": "def exact_p_value(independents, observation):\n def all_binary_combinations(independents):\n probabilities = []\n if len(independents) == 1:\n probabilities.append((independents[0][0], [independents[0][1]]))\n probabilities.append(\n ((1 - independents[0][0]), ['not ' + independents[0][1]]))\n else:\n for probability in all_binary_combinations(independents[1:]):\n probabilities.append(\n (independents[0][0] * probability[0],\n [independents[0][1]] + probability[1]))\n probabilities.append(\n ((1 - independents[0][0]) * probability[0],\n ['not ' + independents[0][1]] + probability[1]))\n return probabilities\n probabilities = all_binary_combinations(independents)\n p = 0\n for probability in probabilities:\n if probability[1] == observation:\n extreme = probability[0]\n for probability in probabilities:\n if probability[0] <= extreme:\n p += probability[0]\n return p\n\n\ndef test_generalizability(s):\n import sal\n import copy\n s.learner.G = 1000\n s.learner.GE = 1000\n for word_pair in (('grab', 'set'), ('pluck', 'rest'), ('grip', 'put'),\n ('get', 'leave'), ('acquire', 'deposit'),\n ('grasp', 'lay'), ('clasp', 'position'),\n ('clutch', 'situate'), ('grapple', 'settle'),\n ('take', 'sit'), ('guiltiest', 'phishers'),\n ('98-93', 'kranhold'), ('swineflu', 'fanck'),\n ('toner', 'assizes'), ('titanosaur', 'märjamaa'),\n ('archeparchy', 'grella'), ('bacteroidetes', 'cowered'),\n ('maritza', 'stylinski'), ('siniora', 'maurycy'),\n ('scrophularia', 'attests')):\n s.atoms.atoms['pick'].name = word_pair[0]\n s.atoms.atoms['place'].name = word_pair[1]\n s.perform(1, cutoff=2)\n s.atoms.atoms['pick'].name = 'pick'\n s.atoms.atoms['place'].name = 'place'\n replacement = 'oil'\n for word in ('petroleum', 'gasoline', 'petrol', 'fuel',\n 'lubricant', 'grease', 'lubrication', 'kerosene', 'diesel',\n 'napalm', 'ivic', 'showed', 'murigande', 'chelios',\n 'aricie', 'ligule', 'oom', 'fedorchenko', 'haugesund',\n 'compilers'):\n original_state_dict = copy.deepcopy(s.problems.problems['train'][0])\n original_goal = copy.deepcopy(s.problems.problems['train'][1])\n for item in original_state_dict:\n if isinstance(item, sal.InnereseAndEnglish):\n if replacement in item.innerese or replacement in item.english:\n value = original_state_dict[item]\n del s.problems.problems['train'][0][item]\n new_item = sal.InnereseAndEnglish('', '')\n new_item.innerese = item.innerese.replace(replacement,\n word)\n new_item.english = item.english.replace(replacement, word)\n s.problems.problems['train'][0][new_item] = value\n for item in s.problems.problems['train'][0]:\n new_values = []\n for value in s.problems.problems['train'][0][item]:\n new_values.append(sal.InnereseAndEnglish(\n value.innerese.replace(replacement, word),\n value.english.replace(replacement, word)))\n s.problems.problems['train'][0][item] = new_values\n s.problems.problems['train'][1][0][0] = sal.InnereseAndEnglish(\n s.problems.problems['train'][1][0][0].innerese.replace(\n replacement, word),\n s.problems.problems['train'][1][0][0].english.replace(replacement,\n word))\n s.problems.problems['train'][1][0][1] = sal.InnereseAndEnglish(\n s.problems.problems['train'][1][0][1].innerese.replace(\n replacement, word),\n s.problems.problems['train'][1][0][1].english.replace(replacement,\n word))\n s.problems.problems['train'][1][1] = sal.InnereseAndEnglish(\n s.problems.problems['train'][1][1].innerese.replace(replacement,\n word),\n s.problems.problems['train'][1][1].english.replace(replacement,\n word))\n s.perform(1, cutoff=2)\n s.problems.problems['train'][0] = original_state_dict\n s.problems.problems['train'][1] = original_goal\n\n\ndef latexable_innerese_operator_traces(dir):\n import dill\n traces = dill.load(open(dir + '/innerese_operator_traces.b', 'rb'))\n string = ''\n for epoch in traces:\n i = 0\n externalized = False\n while i < len(epoch[1]):\n if epoch[1][i] == 'externalize':\n externalized = True\n i += 1\n elif externalized and epoch[1][i] == 'internalize':\n epoch[1][i] = r'\\{ ' + epoch[1][i] + \\\n ', ' + epoch[1][i - 1] + r' \\}'\n del epoch[1][i - 1]\n elif epoch[1][i].startswith('( then'):\n epoch[1][i] = r'\\{ [ ' + epoch[1][i - 2] + ', ' + epoch[\n 1][i - 1] + ']' + ', ( then ' + epoch[\n 1][i - 2] + ' ' + epoch[1][i - 1] + r' ) \\}'\n del epoch[1][i - 1]\n del epoch[1][i - 2]\n i -= 1\n else:\n i += 1\n string += r'\\scriptsize' + '\\n'\n for operator in epoch[1]:\n string += operator + ', '\n string = string[:-2]\n string += r' \\\\ \\hline ' + '\\n'\n return string\n" }, { "alpha_fraction": 0.5475480556488037, "alphanum_fraction": 0.553361177444458, "avg_line_length": 39.660606384277344, "blob_id": "16bc81adacb70332041d95bfdbb6d8e29cfd3c12", "content_id": "7657fb4f2a1eb4de4ac2cd56c5494a549e586426", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13418, "license_type": "no_license", "max_line_length": 78, "num_lines": 330, "path": "/sal/learner.py", "repo_name": "TristanThrush/sal", "src_encoding": "UTF-8", "text": "import random\nimport gc\nfrom collections import namedtuple\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport copy\nimport os\nimport dill\nfrom generator import OperatorGenerator\nif torch.cuda.is_available():\n FloatTensor = torch.cuda.FloatTensor\n LongTensor = torch.cuda.LongTensor\nelse:\n FloatTensor = torch.FloatTensor\n LongTensor = torch.LongTensor\n\n\nTransition = namedtuple('Transition',\n ('innerese_mental_state',\n 'next_innerese_problem_state_and_goal', 'reward'))\n\n\nclass ReplayMemory(object):\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\nclass EmbeddingsNavigator():\n def __init__(self, embeddings_path):\n self.words_to_indices, self.embeddings =\\\n self.load_embeddings(embeddings_path)\n self.vocab_size, self.embedding_dim = self.embeddings.size()\n\n def load_embeddings(self, embeddings_path):\n lines = open(embeddings_path).readlines()\n words_to_indices = {}\n embeddings = []\n index = 0\n for line in lines:\n line_list = line.split()\n words_to_indices[line_list[0]] = index\n embeddings.append(list(map(lambda line_list_number:\n float(line_list_number),\n line_list[1:])))\n index += 1\n if torch.cuda.is_available():\n # Load into CPU first for speed.\n embeddings = torch.FloatTensor(embeddings).cuda()\n else:\n embeddings = torch.FloatTensor(embeddings)\n return words_to_indices, embeddings\n\n\nclass MergeModule(nn.Module):\n def __init__(self, lstm_hidden_state_size, embeddings_path):\n super(MergeModule, self).__init__()\n self.tokenizer = EmbeddingsNavigator(embeddings_path)\n self.lstm_hidden_state_size = lstm_hidden_state_size\n self.word_embeddings = nn.Embedding(self.tokenizer.vocab_size,\n self.tokenizer.embedding_dim)\n self.word_embeddings.weight.data.copy_(self.tokenizer.embeddings)\n self.word_embeddings.weight.requires_grad = False\n self.lstm = nn.LSTM(\n self.tokenizer.embedding_dim,\n self.lstm_hidden_state_size,\n bidirectional=True)\n self.rescale = nn.Linear(self.lstm_hidden_state_size * 2,\n self.tokenizer.embedding_dim)\n self.memory = {}\n\n def init_hidden(self):\n self.hidden = (Variable(FloatTensor(\n [[[0] * self.lstm_hidden_state_size] * 1] * 2)),\n Variable(FloatTensor(\n [[[0] * self.lstm_hidden_state_size] * 1] * 2)))\n\n def merge(self, constituents):\n if len(constituents) == 1:\n return constituents[0]\n self.init_hidden()\n x = torch.cat(constituents)\n x, self.hidden = self.lstm(x.view(len(constituents), 1, -1),\n self.hidden)\n x = F.max_pool1d(\n torch.t(x.view(len(constituents), -1)).unsqueeze(0),\n kernel_size=len(constituents))\n return self.rescale(x.squeeze().unsqueeze(0))\n\n def parse(self, innerese):\n if innerese in self.memory:\n return self.memory[innerese]\n constituent_set = set()\n constituent_vectors = []\n open_parens = 0\n closed_parens = 0\n constituent = ''\n length_counter = len(innerese)\n recovered_english = []\n for character in innerese:\n length_counter -= 1\n constituent += character\n if character == '(':\n open_parens += 1\n if character == ')':\n closed_parens += 1\n if (character == ' ' or length_counter == 0)\\\n and open_parens == closed_parens:\n if character == ' ':\n constituent = constituent[:-1]\n if '(' in constituent:\n constituent_vector, sub_constituent_set,\\\n recovered_sub_english = self.parse(constituent[2:-2])\n constituent_vectors.append(constituent_vector)\n constituent_set |= sub_constituent_set\n recovered_english.append(recovered_sub_english)\n else:\n constituent_index = Variable(LongTensor(\n [self.tokenizer.words_to_indices[constituent]]))\n constituent_vectors.append(\n self.word_embeddings(constituent_index))\n recovered_english.append(constituent)\n constituent_set.add(constituent)\n constituent = ''\n if len(recovered_english) == 3:\n recovered_english = recovered_english[1] + ' ' +\\\n recovered_english[0] + ' ' + recovered_english[2]\n else:\n recovered_english = ' '.join(recovered_english)\n self.memory[innerese] = (self.merge(constituent_vectors),\n constituent_set, recovered_english)\n return self.parse(innerese)\n\n def forward(self, state):\n constituent_vector, self.constituent_set, self.recovered_english =\\\n self.parse('( ' + ' '.join(state) + ' )')\n return constituent_vector.squeeze()[:1]\n\n\nclass Learner:\n def __init__(self, sal, target=False):\n\n self.sal = sal\n\n self.EMBEDDINGS_PATH = os.path.dirname(os.path.abspath(__file__)) + \\\n '/glove.6B.100d.txt'\n self.TARGET_UPDATE = 10\n self.GAMMA = 0.99\n self.G = 500\n self.GE = 2\n self.LR = 0.0003\n self.BATCH_SIZE = 20\n self.REPLAY_MEMORY_SIZE = 200\n self.LSTM_HIDDEN_STATE_SIZE = 100\n\n self.target_counter = 0\n\n self.merge_module = MergeModule(self.LSTM_HIDDEN_STATE_SIZE,\n self.EMBEDDINGS_PATH)\n if torch.cuda.is_available():\n self.merge_module.cuda()\n\n self.optimizer = optim.Adam(filter(\n lambda parameter: parameter.requires_grad,\n list(self.merge_module.parameters())), lr=self.LR)\n\n self.replay_memory = ReplayMemory(self.REPLAY_MEMORY_SIZE)\n\n if not target:\n self.target = Learner(sal, target=True)\n self.target.merge_module.eval()\n\n def generate_and_choose_operator(self, innerese_problem_state_and_goal,\n beam, forced_operator=None,\n return_generations=False):\n\n self.merge_module(innerese_problem_state_and_goal)\n terminals = list(self.merge_module.constituent_set)\n\n if forced_operator is None:\n OperatorGenerator.temporary_off_limit_productions = set()\n operator_and_innerese_mental_state_list = []\n for i in range(beam):\n operator = OperatorGenerator.generate(\n list(map(lambda name: self.sal.atoms.choice(name),\n self.sal.atoms.names())),\n terminals)\n OperatorGenerator.temporary_off_limit_productions.add(\n operator.innerese_and_english().innerese)\n operator_and_innerese_mental_state_list.append((\n operator,\n innerese_problem_state_and_goal\n + ['( will i ' + operator.innerese_and_english().innerese\n + ' )']))\n sorted_generations = sorted(map(\n lambda operator_and_innerese_mental_state:\n (self.merge_module(operator_and_innerese_mental_state[1]),\n operator_and_innerese_mental_state[0]),\n operator_and_innerese_mental_state_list),\n key=lambda item: item[0])\n OperatorGenerator.temporary_off_limit_productions = set()\n if return_generations:\n return sorted_generations\n return sorted_generations[-1]\n else:\n return (self.merge_module(\n innerese_problem_state_and_goal + ['( will i '\n + forced_operator.\n innerese_and_english().\n innerese + ' )']),\n forced_operator)\n\n def operate(self, innerese_problem_state_and_goal, forced_operator=None):\n value_approximation, operator = self.generate_and_choose_operator(\n innerese_problem_state_and_goal, random.choice(\n (self.GE, self.GE, self.G)), forced_operator)\n next_innerese_problem_state_and_goal, reward = operator.function()\n self.sal.operator_traces[-1][1].append(operator)\n self.replay_memory.push(\n innerese_problem_state_and_goal + ['( will i '\n + operator.\n innerese_and_english().innerese\n + ' )'],\n list(map(lambda innerese_and_english: innerese_and_english.\n innerese, next_innerese_problem_state_and_goal)),\n FloatTensor([reward]))\n return next_innerese_problem_state_and_goal, reward, operator\n\n def optimize(self):\n\n # Uptate target network, if it is time.\n self.target_counter += 1\n if self.target_counter == self.TARGET_UPDATE:\n self.target.merge_module.load_state_dict(\n self.merge_module.state_dict())\n self.target_counter = 0\n\n # Get and transpose the batch.\n transitions = self.replay_memory.sample(\n min(self.BATCH_SIZE, len(self.replay_memory)))\n batch = Transition(*zip(*transitions))\n\n # Get policy network value estimates for the states.\n innerese_mental_state_values = torch.cat(tuple(map(\n self.merge_module, batch.innerese_mental_state)))\n\n # Get target network value estimates for the next states.\n next_innerese_mental_state_values = torch.cat(\n tuple(\n map(\n lambda next_innerese_problem_state_and_goal:\n self.target.generate_and_choose_operator(\n next_innerese_problem_state_and_goal,\n self.G)[0],\n batch.next_innerese_problem_state_and_goal)))\n\n # Compute the expected values.\n with torch.no_grad():\n reward_batch = Variable(torch.cat(batch.reward))\n expected_innerese_mental_state_values = (\n next_innerese_mental_state_values * self.GAMMA) + reward_batch\n\n # Compute Huber loss.\n loss = F.smooth_l1_loss(\n innerese_mental_state_values,\n expected_innerese_mental_state_values)\n\n # Optimize the model.\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Free memory\n self.merge_module.memory = {}\n self.target.merge_module.memory = {}\n gc.collect()\n\n def save(self, directory):\n converted_replay_memory =\\\n ReplayMemory(self.REPLAY_MEMORY_SIZE)\n [converted_replay_memory.push(\n transition[0], transition[1],\n transition[2].type(torch.FloatTensor))\n for transition in self.replay_memory.memory]\n dill.dump(converted_replay_memory, open(\n directory + '/replay_memory.b', 'wb'))\n torch.save(self.merge_module.state_dict(),\n directory + '/policy_merge_module.pt')\n torch.save(self.target.merge_module.state_dict(),\n directory + '/target_merge_module.pt')\n\n def load(self, directory):\n self.replay_memory = ReplayMemory(self.REPLAY_MEMORY_SIZE)\n [self.replay_memory.push(transition[0], transition[1],\n transition[2].type(FloatTensor))\n for transition in dill.load(open(\n directory + '/replay_memory.b', 'rb')).memory]\n if torch.cuda.is_available():\n self.merge_module.load_state_dict(\n torch.load(directory + '/policy_merge_module.pt'))\n self.target.merge_module.load_state_dict(\n torch.load(directory + '/target_merge_module.pt'))\n else:\n self.merge_module.load_state_dict(\n torch.load(directory + '/policy_merge_module.pt',\n map_location=torch.device('cpu')))\n self.target.merge_module.load_state_dict(\n torch.load(directory + '/target_merge_module.pt',\n map_location=torch.device('cpu')))\n\n # Free memory\n self.merge_module.memory = {}\n self.target.merge_module.memory = {}\n gc.collect()\n" } ]
7
valouxis/py-ask
https://github.com/valouxis/py-ask
b7eb0e3e51eb7736180e8c819938c78700656fed
b7858aef0587e3c597619c6c16d60918f604f479
bb32b6b661cbb35f7edc56f7153cc5e9b1000126
refs/heads/master
2022-12-30T20:09:55.663925
2020-10-25T14:50:41
2020-10-25T14:50:41
307,073,359
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.40055760741233826, "alphanum_fraction": 0.43308550119400024, "avg_line_length": 13.94444465637207, "blob_id": "9e6b45541f0fc1a9cf5b89fb9afe79361f1e3d23", "content_id": "cdda93aa487eab746781e8b71d5a0533c076d250", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1084, "license_type": "no_license", "max_line_length": 58, "num_lines": 72, "path": "/p3.py", "repo_name": "valouxis/py-ask", "src_encoding": "UTF-8", "text": "'''\nhttps://edabit.com/challenge/dy3WWJr34gSGRPLee\nMaking a Box\n------------\nCreate a function that creates a box based on dimension n.\n\nExamples\nmake_box(5) ➞ \n#####\n# #\n# #\n# #\n#####\n\nmake_box(3) ➞ \n###\n# #\n###\n\nmake_box(2) ➞ \n##\n##\n\nmake_box(1) ➞ \n#\n\nprint(make_triangle(7)) -->\n #\n # #\n # #\n # #\n # #\n # #\n#############\n\nprint(make_triangle(5)) -->\n #\n # #\n # #\n # #\n#########\n\nprint(make_triangle(1)) -->\n#\n'''\ndef make_box(n):\n s1 = n * '#'\n lst = [s1]\n if n > 1:\n s2 = '#' + (n-2) * ' ' + '#'\n # for i in range(n-2): lst.append(s2)\n # lst.append(s1)\n lst += [s2] * (n-2)\n lst += [s1]\n return '\\n'.join(lst)\n\n# only odd dimensions (height)\ndef make_triangle(n):\n lst = [' '*(n-1) + '#']\n for i in range(n-2): \n lst += [' '*(n-i-2) + '#' + ' '*(2*i+1) + '#']\n if n > 1: lst += ['#'*(2*n-1)]\n return '\\n'.join(lst)\n\n# test\nprint(make_box(7))\nprint(make_box(5))\nprint(make_box(1))\n\nprint(make_triangle(7))\nprint(make_triangle(5))\nprint(make_triangle(1))\n" }, { "alpha_fraction": 0.30000001192092896, "alphanum_fraction": 0.30000001192092896, "avg_line_length": 5.666666507720947, "blob_id": "e52b2837443d37df803d3182c69904910cf009e1", "content_id": "288031b4eb3381a3ac34eba8108610d4c9b07b42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 7, "num_lines": 3, "path": "/readme.md", "repo_name": "valouxis/py-ask", "src_encoding": "UTF-8", "text": "---\nread_me\n-------\n" }, { "alpha_fraction": 0.595111608505249, "alphanum_fraction": 0.6259298324584961, "avg_line_length": 20.88372039794922, "blob_id": "fd58b6d4dc15524c51944d780b9b0e3aebb277eb", "content_id": "be38455b2b19becc09178f83d26c136a7c80f6f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 947, "license_type": "no_license", "max_line_length": 66, "num_lines": 43, "path": "/p6.py", "repo_name": "valouxis/py-ask", "src_encoding": "UTF-8", "text": "'''\nhttps://edabit.com/challenge/QN4RMpAnktNvMCWwg\nIdentity Matrix\n---------------\nAn identity matrix is defined as a square matrix with 1s running \nfrom the top left of the square to the bottom right. \nThe rest are 0s. \n\nCreate a function that takes an integer n and returns \nthe identity matrix of n x n dimensions. \nFor this challenge, if the integer is negative, return the mirror \nimage of the identity matrix of n x n dimensions. \n\nExamples\nid_mtrx(2) ➞ [\n [1, 0],\n [0, 1]\n]\nid_mtrx(-2) ➞ [\n [0, 1],\n [1, 0]\n]\nid_mtrx(0) ➞ []\n\nNotes\nIncompatible types passed as n should return the string \"Error\".\n'''\ndef id_mtrx(n):\n if not type(n) is int: return('Error')\n lst = []\n n1 = abs(n)\n for i in range(n1):\n row = [0]*n1\n if n > 0: row[i] = 1 \n else: row[n1-i-1] = 1 \n lst.append(row)\n return lst\n\nprint(id_mtrx(2))\nprint(id_mtrx(-2))\nprint(id_mtrx(0))\nprint(id_mtrx('0'))\nprint(id_mtrx(1.2))\n" }, { "alpha_fraction": 0.5174887776374817, "alphanum_fraction": 0.5766816139221191, "avg_line_length": 23.77777862548828, "blob_id": "a4dd3ebf3cd235e295b4b2399c9f9a00ce4d7fcf", "content_id": "fcb07cf1669b4cabc0e861171dd7594ea9a96ab2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 76, "num_lines": 45, "path": "/p1.py", "repo_name": "valouxis/py-ask", "src_encoding": "UTF-8", "text": "'''\nhttps://edabit.com/challenge/GAbxxcsKoLGKtwjRB\nSum of Prime Numbers\n--------------------\nCreate a function that takes a list of numbers and returns \nthe sum of all prime numbers in the list.\n\nExamples\nsum_primes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) ➞ 17\nsum_primes([2, 3, 4, 11, 20, 50, 71]) ➞ 87\nsum_primes([]) ➞ None\n\nNotes\n List elements are always greater than 0.\n A prime number is a number which has exactly two divisors.\n'''\ndef is_prime(n):\n if n <= 1: return False\n if n == 2: return True\n if n % 2 == 0:\n return False\n for i in range(3, int(n**0.5)+1, 2):\n if n % i == 0:\n return False\n return True\n\ndef sum_primes(lst):\n plst = []\n for x in lst:\n if is_prime(x):\n plst.append(x)\n \n if len(plst): return sum(plst)\n else: None\n\n# def sum_primes(lst):\n# \tisprime = lambda n: n > 1 and all(n % i for i in range(2, int(n**0.5)+1))\n# \treturn sum(n for n in lst if isprime(n)) or None\n\n# test\nprint(sum_primes([2]))\nprint(is_prime(2))\n\nprint(sum_primes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))\nprint(sum_primes([2, 3, 4, 11, 20, 50, 71]))\n" }, { "alpha_fraction": 0.6019047498703003, "alphanum_fraction": 0.6942856907844543, "avg_line_length": 27.37837791442871, "blob_id": "87d16c2263253f4756ad81bd73a7d33d446be3bd", "content_id": "eb803bcd223d082a7a704dd9661433de532721f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1056, "license_type": "no_license", "max_line_length": 90, "num_lines": 37, "path": "/p5.py", "repo_name": "valouxis/py-ask", "src_encoding": "UTF-8", "text": "'''\nhttps://edabit.com/challenge/Xkc2iAjwCap2z9N5D\nFriday the 13th\n---------------\nGiven the month and year as numbers, return whether that month \ncontains a Friday 13th.\n\nExamples\nhas_friday_13(3, 2020) ➞ True\nhas_friday_13(10, 2017) ➞ True\nhas_friday_13(1, 1985) ➞ False\n\nNotes\n January will be given as 1, February as 2, etc ...\n Check Resources for some helpful tutorials on Python's datetime module.\n'''\nimport datetime\ndef has_friday_13(month, year):\n d = datetime.date(year, month, 13)\n return d.weekday() == 4\n\n# Given the year, return how many months contain a Friday 13th.\ndef how_many_friday_13(year):\n return [has_friday_13(m, year) for m in range(1, 13)].count(True)\n\n# Given the century, return ...\n# def year_max_friday_13(century):\n # return max([(how_many_friday_13(y),y) for y in range(century*100, (century+1)*100)])\n\n# test\nprint(has_friday_13(3, 2020))\nprint(has_friday_13(10, 2017))\nprint(has_friday_13(1, 1985))\n\nprint(how_many_friday_13(2020))\nprint(how_many_friday_13(2017))\nprint(how_many_friday_13(1986))\n" }, { "alpha_fraction": 0.6145198941230774, "alphanum_fraction": 0.6604215502738953, "avg_line_length": 26.02531623840332, "blob_id": "d25a04d8e3d3c1464cbd623dd203da5218e8e5fd", "content_id": "fd1eb151707398218e9180a47c904965d00b8360", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2147, "license_type": "no_license", "max_line_length": 70, "num_lines": 79, "path": "/p4.py", "repo_name": "valouxis/py-ask", "src_encoding": "UTF-8", "text": "'''\nhttps://edabit.com/challenge/BfSj2nBc33aCQrbSg\nTruncatable Primes\n------------------\nA left-truncatable prime is a prime number that contains no 0 \ndigits and, when the first digit is successively removed, \nthe result is always prime.\n\nA right-truncatable prime is a prime number that contains no 0 \ndigits and, when the last digit is successively removed, \nthe result is always prime.\n\nCreate a function that takes an integer as an argument and:\n If the integer is only a left-truncatable prime, return \"left\".\n If the integer is only a right-truncatable prime, return \"right\".\n If the integer is both, return \"both\".\n Otherwise, return False.\n\nExamples\ntruncatable(9137) ➞ \"left\"\n# Because 9137, 137, 37 and 7 are all prime.\n\ntruncatable(5939) ➞ \"right\"\n# Because 5939, 593, 59 and 5 are all prime.\n\ntruncatable(317) ➞ \"both\"\n# Because 317, 17 and 7 are all prime and 317, 31 and 3 are all prime.\n\ntruncatable(5) ➞ \"both\"\n# The trivial case of single-digit primes is treated as truncatable \n# from both directions.\n\ntruncatable(139) ➞ False\n# 1 and 9 are non-prime, so 139 cannot be truncatable from \n# either direction.\n\ntruncatable(103) ➞ False\n# Because it contains a 0 digit (even though 103 and 3 are primes).\n'''\ndef is_prime(n):\n if n <= 1: return False\n if n == 2: return True\n if n % 2 == 0:\n return False\n for i in range(3, int(n**0.5)+1, 2):\n if n % i == 0:\n return False\n return True\n\ndef truncatable(n):\n if not is_prime(n): return False\n strn = str(n)\n\n if '0' in strn: return False\n\n is_left = True\n for i in range(len(strn)-1):\n if not is_prime(int(strn[i+1:])): \n is_left = False\n break\n\n is_right = True\n for i in range(len(strn)-1):\n if not is_prime(int(strn[:i+1])): \n is_right = False\n break\n\n if is_left and is_right: return 'both'\n elif is_left: return 'left'\n elif is_right: return 'right'\n else: return False\n\n# test\nprint(truncatable(9137))\nprint(truncatable(5939))\nprint(truncatable(317))\nprint(truncatable(5))\nprint(truncatable(139))\nprint(truncatable(103))\n" } ]
6
Loksly/bdge
https://github.com/Loksly/bdge
3233a356ff2b03b96b4020629b23b59f8d238b89
6b8e3beba1fe7f46b46b295896c4694027481daa
2fd4828cf25d7e2a2c693269485bc77e987727c8
refs/heads/master
2020-06-30T04:00:45.666723
2017-01-24T12:02:13
2017-01-24T12:02:13
74,393,022
0
0
null
2016-11-21T18:24:38
2016-09-06T12:00:53
2016-11-18T14:57:31
null
[ { "alpha_fraction": 0.6241709589958191, "alphanum_fraction": 0.6315401792526245, "avg_line_length": 21.36206817626953, "blob_id": "439d6110da4a8c956065624cb39e1d803e057b97", "content_id": "1faa4355f50f1cd4a1185ff413196761a537c353", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1357, "license_type": "permissive", "max_line_length": 177, "num_lines": 58, "path": "/practica2/UpdateScriptSize.py", "repo_name": "Loksly/bdge", "src_encoding": "UTF-8", "text": "import mysql.connector\r\nfrom CodeCount import CodeCount\r\n\r\n\"\"\"\r\n\tCREATE TABLE IF NOT EXISTS `postStats` (\r\n\t`Id` int(11) NOT NULL auto_increment,\r\n\t`postId` INT NOT NULL,\r\n\t`codeLength` INT NOT NULL,\r\n\t`length` INT NOT NULL, PRIMARY KEY (`Id`));\r\n\r\n\"\"\"\r\n\r\ncnx = mysql.connector.connect(user='root', password='root',\r\n host='127.0.0.1',\r\n database='stackoverflow')\r\n\r\n\r\ncursor = cnx.cursor()\r\n\r\nquery = (\"SELECT Id, Body, Score, PostTypeId, FavoriteCount FROM Posts WHERE Id not in (select postId from new_posts_meta)\")\r\n\r\n\r\ncursor.execute(query)\r\n\r\np = [] \r\n\r\nfor (id, body, score, postTypeId, favCount) in cursor:\r\n\tc = CodeCount()\r\n\tbody = unicode(body)\r\n\tc.run(body)\r\n\t\r\n\tratio = 0\r\n\tif (c.nocode_length + c.code_length >0):\r\n\t\tratio = c.code_length / float(c.nocode_length + c.code_length )\r\n\r\n\trow = []\r\n\trow.append(id)\r\n\trow.append(c.nocode_length)\r\n\trow.append(c.code_length)\r\n\trow.append(ratio)\r\n\trow.append(score)\r\n\trow.append(postTypeId)\r\n\trow.append(favCount)\r\n\r\n\tp.append( row )\r\n\t\r\n\r\ncq = cnx.cursor()\r\ninsertquery = (\"insert into new_posts_meta (postId, plaintext_nocode_length, plaintext_code_length, code_text_ratio, score, postTypeId, favCount) values (%s,%s,%s,%s,%s,%s,%s)\")\r\ncq.executemany(insertquery, p)\r\ncq.close()\r\n\r\ncursor.close()\r\n\r\ncnx.commit()\r\n\r\n\r\ncnx.close()\r\n\r\n" }, { "alpha_fraction": 0.6879968047142029, "alphanum_fraction": 0.7054848670959473, "avg_line_length": 29.670732498168945, "blob_id": "41c1494142bbfc980c915945c7dbd07d57e36a6a", "content_id": "f2ebbae9bb394f942eeed0654f7724d0d5259f88", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2526, "license_type": "permissive", "max_line_length": 261, "num_lines": 82, "path": "/neo4j/README.md", "repo_name": "Loksly/bdge", "src_encoding": "UTF-8", "text": "## Neo4j\n\n\n\n### EJERCICIO: Construir los nodos :Tag para cada uno de los tags que aparecen en las preguntas. Construir las relaciones ```post-[:TAGGED]->tag``` para cada tag y también ```tag-[:TAGS]->post```\n\nPara ello, buscar en la ayuda las construcciones WITH y UNWIND y las funciones replace() y split() de Cypher. La siguiente consulta debe retornar 1192 resultados:\n\n```neo4j\nMATCH p=(t:Tag)-[:TAGS]->(:Question)\n\tWHERE t.name =~ \"^java$|^c\\\\+\\\\+$\" RETURN count(p);\n```\n\n\n#### Borrar las Tags:\n```neo4j\nMATCH (n: Tag)\n\tDETACH DELETE n\n;\n```\n\n#### Definir sus restricciones:\n```neo4j\nCREATE CONSTRAINT ON (t:Tag)\n\tASSERT t.Name IS UNIQUE;\n```\n\n#### Solución:\n```neo4j\nMATCH ( c:Question )\n\tUNWIND SPLIT(replace(c.Tags,\">\", \"<\"), \"<\") as tagC\n\t\tWITH distinct tagC as tag, c as pregunta\n\t\tMERGE (t:Tag {name: tag })\n\t\tMERGE (t)-[:TAGS]->(pregunta)\n\t\tMERGE (pregunta)-[:TAGGED]->(t)\n;\n```\nAmpliamente mejorable el _split_, pero válido.\n\n```text\nAdded 874 labels, created 874 nodes, set 874 properties, created 46218 relationships, statement executed in 23598 ms.\n```\n\n\nLa siguiente consulta muestra los usuarios que preguntan por cada Tag:\n\n```neo4j\nMATCH (t:Tag)-->(:Question)<--(u:User) RETURN t.name,collect(distinct u.Id) ORDER BY t.name;\n```\n\nEl mismo MATCH se puede usar para encontrar qué conjunto de tags ha usado cada usuario cambiando lo que retornamos:\n\n```neo4j\nMATCH (t:Tag)-->(:Question)<--(u:User) RETURN u.Id,collect(distinct t.name) ORDER BY toInt(u.Id);\n```\n\n\n### EJERCICIO: Relacionar cada usuario con los tags de sus preguntas a través de la relación _:INTERESTED_IN_.\n\n```neo4j\nMATCH (u: User)-[:WROTE]->(:Question)-[:TAGGED]->(label: Tag)\n\tMERGE (u)-[:INTERESTED_IN]->(label);\n```\n\nSalida:\n```text\nCreated 12327 relationships, statement executed in 381 ms.\n```\n\n\n### EJERCICIO: Recomendar a los usuarios tags sobre los que podrían estar interesados en base a tags en los que los usuarios con los que están relacionados con _:RECIPROCATE_ están interesados y ellos no, ordenado por número de usuarios interesados en cada tag.\n\n```neo4j\nMATCH (usuario1: User)-[:RECIPROCATE]->(usuario2: User)-[:INTERESTED_IN]->(etiqueta)\n\tWHERE NOT (usuario1)-[:INTERESTED_IN]->(etiqueta)\n\tWITH etiqueta as etiqueta, usuario1 as usuario\n\tMATCH(interesados)-[:INTERESTED_IN]->(etiqueta)\n\t\tWITH etiqueta as etiqueta, usuario as u, count(interesados) as inter\n\t\tORDER BY inter desc\n\t\tRETURN u as idusuario, COLLECT(distinct etiqueta.name) AS recomendaciones\n```\n[Ejemplo de salida en CSV](./recomendaciones.csv)\n\n" }, { "alpha_fraction": 0.4953076243400574, "alphanum_fraction": 0.514424741268158, "avg_line_length": 16.319276809692383, "blob_id": "826024efc3be4f0e87fa13f9c001ceb0af6b8b54", "content_id": "893029c233acc0cee2c811d35f6a2051562366b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2880, "license_type": "permissive", "max_line_length": 88, "num_lines": 166, "path": "/practica4/README.md", "repo_name": "Loksly/bdge", "src_encoding": "UTF-8", "text": "\n\n### RQ1:\n\n#### Framework de agregación:\n\n```mongodb\ndb.posts.aggregate([\n\t{$match: { PostTypeId: 1} },\n\t{$group: {_id: \"$OwnerUserId\", count:{$sum: 1} }},\n\t{$group: { _id: \"$count\", howmany: {$sum: 1}}}\n]);\n```\n\n#### Map/Reduce:\n\n```mongodb\ndb.rq1_.drop();\ndb.rq1.drop();\ndb.posts.mapReduce(\n\tfunction(){\n\t\temit(this.OwnerUserId, 1);\n\t},\n\tfunction(key, values){\n\t\treturn Array.sum(values);\n\t},\n\t{\n\t\tquery:{ PostTypeId: 1 },\n\t\tout: \"rq1_\"\n\t}\n);\ndb.rq1_.mapReduce(\n\tfunction(){\n\t\temit(this.value, 1);\n\t},\n\tfunction(key, values){\n\t\treturn Array.sum(values);\n\t},\n\t{\n\t\tout: \"rq1\"\n\t}\n);\ndb.rq1_.drop();\n```\n\n### RQ2:\n\n#### Framework de agregación:\n\n```mongodb\ndb.posts.aggregate([\n\t{$match: { PostTypeId: 2} },\n\t{$group: {_id: \"$OwnerUserId\", count:{$sum: 1} }},\n\t{$group: { _id: \"$count\", howmany: {$sum: 1}}}\n]);\n```\n\n#### Map/Reduce:\n```mongodb\ndb.rq2_.drop();\ndb.rq2.drop();\ndb.posts.mapReduce(\n\tfunction(){\n\t\temit(this.OwnerUserId, 1);\n\t},\n\tfunction(key, values){\n\t\treturn Array.sum(values);\n\t},\n\t{\n\t\tquery:{ PostTypeId: 2 },\n\t\tout: \"rq2_\"\n\t}\n);\ndb.rq2_.mapReduce(\n\tfunction(){\n\t\temit(this.value, 1);\n\t},\n\tfunction(key, values){\n\t\treturn Array.sum(values);\n\t},\n\t{\n\t\tout: \"rq1\"\n\t}\n);\ndb.rq2_.drop();\n```\n\n\n### RQ3:\n\n### Framework de agregación:\n\n```mongodb\ndb.posts.aggregate([\n\t{\n\t\t'$match':\n\t\t\t{ 'PostTypeId': { '$in': [1, 2] } } },\n\t{\n\t\t'$group': {\n\t\t\t_id: \"$OwnerUserId\",\n\t\t\t'numPreguntas': {'$sum': { '$mod': [\"$PostTypeId\", 2] } } ,\n\t\t\t'numRespuestas': {'$sum': { '$ceil': {'$divide': ['$PostTypeId', 2] } } }\n\t\t}\n\t},\n\t{\n\t\t'$project': {\n\t\t\t'percentQuestions': {\n\t\t\t\t'$ceil':\n\t\t\t\t\t{\n\t\t\t\t\t\t$multiply: [\n\t\t\t\t\t\t100,\n\t\t\t\t\t\t\t{ $divide: ['$numRespuestas', {'$add': ['$numPreguntas', '$numRespuestas'] } ] }\n\t\t\t\t\t\t]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t}\n\t},\n\t{$group: { _id: \"$percentQuestions\", howmany: {$sum: 1}}}\n]);\n\n```\n\n\n#### Map/Reduce:\n\n```mongodb\ndb.rq3_.drop();\ndb.rq3.drop();\ndb.posts.mapReduce(\n function(){\n var obj = {\n numPreguntas: (this.PostTypeId === 1) ? 1 : 0,\n numRespuestas: (this.PostTypeId === 2) ? 1 : 0\n };\n\n emit(this.OwnerUserId, obj);\n },\n function(key, values){\n var obj = {\n numPreguntas: 0,\n numRespuestas: 0\n };\n for(var i = 0, j = values.length; i < j; i++){\n obj.numPreguntas += values[i].numPreguntas;\n obj.numRespuestas += values[i].numRespuestas; \n }\n },\n {\n query:{ PostTypeId: { $in: [1, 2] } },\n out: \"rq3_\",\n finalize: function(key, value){\n var numPreguntasYRespuestas = value.numPreguntas + value.numRespuestas;\n return Math.ceil(100 * value.numPreguntas / numPreguntasYRespuestas);\n }\n }\n);\ndb.rq3_.mapReduce(\n function(){\n emit(this.value, 1);\n },\n function(key, values){\n return Array.sum(values);\n },\n {\n out: \"rq3\"\n }\n);\n```\n" }, { "alpha_fraction": 0.6814995408058167, "alphanum_fraction": 0.6903383135795593, "avg_line_length": 33.279571533203125, "blob_id": "7dafe595cd9ea93187b19e15dfc00d8671a6dc4b", "content_id": "96b4183be38030c143042a1d0f805ec2ac9a207f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3299, "license_type": "permissive", "max_line_length": 213, "num_lines": 93, "path": "/hbase/README.md", "repo_name": "Loksly/bdge", "src_encoding": "UTF-8", "text": "# HBASE\r\n\r\n\r\n### Shell de hbase\r\n\r\nEl shell de hbase nos permite la ejecución de comandos sobre el gestor de la base de datos utilizando el propio cliente de línea\r\nde comandos proporcionado por hbase. Para entrar en el shell de _hbase_ tan sólo es necesario ejecutar:\r\n\r\n```bash\r\n$ hbase/bin/hbase\r\n```\r\n\r\n## Ejercicios\r\n\r\n\r\n### Mostrar la familia de columnas revision para la entrada ASCII de la tabla wikipedia.\r\n```hbase\r\nhbase> get 'wikipedia', 'ASCII', {COLUMN => 'revision',VERSIONS=>10}\r\n```\r\n\r\n### Mostrar las 20 primeras filas de la tabla wikipedia cuyas columnas empiecen por com.\r\n\r\nCreo que te refieres a esto:\r\n\r\n```hbase\r\nhbase> scan 'wikipedia', {FILTER =>\"ColumnPrefixFilter('com')\", LIMIT => 20}\r\n```\r\n\r\nY no a esto:\r\n\r\n```hbase\r\nhbase> scan 'wikipedia', {STARTROW => 'com', FILTER => \"PrefixFilter('com')\", LIMIT => 20}\r\n```\r\n \r\n### Mostrar las 20 primeras filas de la tabla wikipedia cuyas columnas empiecen por com y la clave de columna empieza por 'B'.\r\n\r\n```hbase\r\nhbase> scan 'wikipedia', {ROWPREFIXFILTER => 'B', FILTER => \"ColumnPrefixFilter('com')\", LIMIT => 20}\r\n```\r\n \r\n### Mostrar sólo la columna revision:author de las filas de la tabla wikipedia cuya clave empiece por a y termine por a (obviando mayúsculas y minúsculas).\r\n```hbase\r\nhbase> scan 'wikipedia', {COLUMNS=>'revision', FILTER => \"PrefixFilter('A')\"}\r\n```\r\n \r\n### Mostrar las filas de la tabla wikipedia cuya clave contenga al menos un número.\r\n```hbase\r\nhbase> scan 'wikipedia', {FILTER => \"RowFilter(=,'regexstring:[0-9])\"}\r\n```\r\n \r\n### Mostrar las filas de la tabla wikipedia cuyo autor de revisión sea Addbot.\r\n```hbase\r\nhbase> scan 'wikipedia', {FILTER=>\"SingleColumnValueFilter('revision', 'author', =, 'binary:Addbot')\"}\r\n```\r\n \r\n### Mostrar las filas de la tabla wikipedia tales que alguno de sus valores de campos de columnas sea menor que 1.\r\n```hbase\r\nhbase> scan 'wikipedia', {FILTER=>\"ValueFilter(<, 'binary:1')\", LIMIT => 20}\r\n```\r\n \r\n### Mostrar las filas de la tabla users (sólo la columna rawdata:Location) de usuarios de España (se supondrá que su localización (columna rawdata:Location) contiene España o ES, obviando mayúsculas y minúsculas).\r\n```hbase\r\nhbase> scan 'users', {FILTER=>\"SingleColumnValueFilter('rawdata','Location',=,'substring:ES')\"}\r\n```\r\n\r\nOtra opción:\r\n\r\n```hbase\r\nhbase> scan 'users', {FILTER=>\"SingleColumnValueFilter('rawdata','Location',=,'binary:Spain') OR SingleColumnValueFilter('rawdata','Location',=,'binary:España')\"}\r\n```\r\n \r\n### Comparar si hay más usuarios de Santiago de Compostela que de Murcia :).\r\n```hbase\r\nhbase> scan 'users', {FILTER=>\"SingleColumnValueFilter('rawdata','Location',=,'substring:Murcia')\"}\r\n```\r\n\r\n```hbase\r\nhbase> scan 'users', {FILTER=>\"SingleColumnValueFilter('rawdata','Location',=,'substring:Santiago de Compostela')\"}\r\n```\r\n\r\nMurcia: 11\r\nSantiago de Compostela: 5\r\n \r\n### Mostrar las filas de la tabla posts que hacen referencia al tag \"clojure\".\r\n\r\n```hbase\r\nhbase> scan 'posts', {FILTER=>\"SingleColumnValueFilter('rawdata','Tags',=,'substring:<clojure>')\"}\r\n```\r\n \r\n### (opcional): Crear una nueva tabla _poststags_ que, de forma eficiente, para cada tag, liste los Id de los posts que utilizan ese tag.\r\n\r\n\r\n[Source shell commands](https://learnhbase.wordpress.com/2013/03/02/hbase-shell-commands/)\r\n" }, { "alpha_fraction": 0.7084271311759949, "alphanum_fraction": 0.7411853075027466, "avg_line_length": 39.409324645996094, "blob_id": "ae74acb4cdbde0c46ca0477a6ba287ea3e94b44f", "content_id": "47c42d0eab8f96ddb7fc9bb7d17134cfad8945f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8039, "license_type": "permissive", "max_line_length": 232, "num_lines": 193, "path": "/practica2/README.md", "repo_name": "Loksly/bdge", "src_encoding": "UTF-8", "text": "\r\n\r\n### Primer artículo\r\n\r\nMostrar cómo conseguir RQ1, RQ2, RQ3 y RQ4 (tablas y gráficas) del\r\n[artículo](http://ink.library.smu.edu.sg/cgi/viewcontent.cgi?article=2810&context=sis_research),\r\ny ver si también se repite en Stackoverflow en español.\r\n\r\n\r\nBasándonos en la descripción de la semántica de los campos que está disponible en: http://meta.stackexchange.com/a/2678\r\n\r\nPodemos deducir que PostTypeId = 1 cuando se trata de una pregunta y que PostTypeId = 2 cuando se trata de una respuesta, por tanto y como ayuda hacemos esta vista:\r\n```mysql\r\ncreate view UserStats as select OwnerUserId, sum(case when PostTypeId = 1 then 1 else 0 end) as numberofquestions, sum(case when PostTypeId = 2 then 1 else 0 end) as numberofanswers from Posts group by OwnerUserId;\r\n```\r\n\r\n\r\n#### RQ1: What are the distributions of developers that post questions?\r\n\r\n\r\n```mysql\r\nselect numberofquestions, count(*) as count from (select count(*) as numberofquestions from Posts where PostTypeId=1 group by OwnerUserId) t group by numberofquestions;\r\n```\r\n\r\n#### RQ2: What are the distributions of developers that answer questions?\r\n\r\n```mysql\r\nselect numberofanswers, count(*) as count from (select count(*) as numberofanswers from Posts where PostTypeId=2 group by OwnerUserId) t group by numberofanswers;\r\n```\r\n\r\n### RQ3: Do developers that ask questions answer questions too?\r\n\r\n```mysql\r\n\r\nselect ratioofanswers, count(*) as count from (select ceiling(numberofanswers / (numberofanswers + numberofquestions) * 100) as ratioofanswers from UserStats) t group by ratioofanswers;\r\n\r\n\r\n```\r\n\r\n### RQ4: Do developers receiving help returns the favor?\r\n```mysql\r\n\r\ncreate view Answerers as select parent.OwnerUserId, GROUP_CONCAT(child.OwnerUserId) as AnswerersIds from Posts parent, Posts child where parent.Id = child.ParentId and parent.PostTypeId=1 and child.PostTypeId=2 group by OwnerUserId;\r\n\r\n```\r\n# un desarrollador es recíproco si contesta a las preguntas de alguno de los desarrolladores que le han contestado\r\n\r\n#incorrecto:\r\nselect OwnerUserId, exists( select * from Answerers a2 where a1.OwnerUserId in a2.AnswerersIds ) from Answerers a1\r\n\r\n\r\n```\r\n\r\n\r\n### [Segundo artículo](http://flosshub.org/sites/flosshub.org/files/hicssSMFinalWatermark.pdf)\r\n\r\nCuando la memoria es gratis y todo se precalcula e, incluso, se replica.\r\n\r\n```mysql\r\n\tCREATE TABLE IF NOT EXISTS `new_posts_meta` (\r\n\t`Id` int(11) NOT NULL auto_increment,\r\n\t`postId` INT NOT NULL,\r\n\t`plaintext_nocode_length` INT NOT NULL,\r\n\t`plaintext_code_length` INT NOT NULL,\r\n\t`code_text_ratio` FLOAT NOT NULL,\r\n\t`score` INT NOT NULL,\r\n\t`postTypeId` INT NOT NULL,\r\n\t`favcount` INT NOT NULL,\r\n\tPRIMARY KEY (`Id`));\r\n```\r\n\r\n\r\n```bash\r\n$ python --version\r\nPython 2.7.10\r\n$ python UpdateScriptSize.py\r\n```` \r\n\r\n#### Q1. Do higher scoring questions include more source code? What about higher scoring answers?\r\n\r\n```mysql\r\nselect code_text_ratio, Score from new_posts_meta inner join Posts on Posts.Id = new_posts_meta.postId limit 10;\r\n```\r\n\r\n```mysql\r\n\r\nselect ceil(count(*) * 0.05) from new_posts_meta where PostTypeId = 1;\r\n/* 352 */\r\n\r\nselect avg(code_text_ratio) from (select code_text_ratio from new_posts_meta where postTypeId = 1 order by Score desc limit 352) t;\r\n/* el 5% de las consultas con más puntuación tienen una proporción de código de 0.331561081944859 */\r\n\r\nselect avg(code_text_ratio) from new_posts_meta where postTypeId = 1;\r\n/* la media de la relación es de: 0.3695720485178994 */\r\n\r\n\r\nselect ceil(count(*) * 0.05) from new_posts_meta where PostTypeId = 2;\r\n/* 447 */\r\n\r\nselect avg(code_text_ratio) from (select code_text_ratio from new_posts_meta where postTypeId = 2 order by Score desc limit 352) t;\r\n/* el 5% de las consultas con más puntuación tienen una proporción de código de 0.31454946994331706 */\r\n\r\nselect avg(code_text_ratio) from new_posts_meta where postTypeId = 2;\r\n/* la media de la relación es de: 0.3902041476015976 */\r\n\r\n\r\n```\r\nLa conclusión es que no en ninguno de los dos casos.\r\n\r\n\r\n#### Q2. Do questions with high favorite counts have more source code?\r\n\r\n```mysql\r\nselect ceil(count(*) * 0.05) from new_posts_meta where PostTypeId = 1;\r\n/* 447 */\r\n\r\nselect avg(code_text_ratio) from (select code_text_ratio from new_posts_meta where postTypeId = 1 order by favcount desc limit 447) t;\r\n/* el 5% de las consultas con más puntuación tienen una proporción de código de 0.369798013959205 */\r\n\r\nselect avg(code_text_ratio) from new_posts_meta where postTypeId = 1;\r\n/* la media de la relación es de: 0.3902041476015976 */\r\n```\r\n\r\nRespuesta: no especialmente.\r\n\r\n#### Q3. Do answers chosen as “accepted answers” have more source code than answers not chosen?\r\n\r\nVenga va, un ejemplo con subconsulta.\r\n\r\n```mysql\r\nselect avg(code_text_ratio) from new_posts_meta where postTypeId=2 and postId in (select AcceptedAnswerId from Posts where postTypeId=1 and AcceptedAnswerId is not null );\r\n/* 0.42342953202548905 */\r\n\r\nselect avg(code_text_ratio) from new_posts_meta where postTypeId=2 and postId not in (select AcceptedAnswerId from Posts where postTypeId=1 and AcceptedAnswerId is not null );\r\n/* 0.3723473803111589 */\r\n\r\n```\r\nLa media de código en este caso es superior en las respuestas aceptadas.\r\n\r\n\r\n#### (opcional) El tiempo mínimo y máximo que pasa entre cada pregunta y la primera respuesta (función TIMESTAMPDIFF() de MySQL).\r\n\r\n```mysql\r\nselect MIN( Timestampdiff(second, pregunta.CreationDate, primerarespuesta.CreationDate ) )\r\n\tfrom Posts pregunta,\r\n\t(select MIN(CreationDate) as CreationDate, ParentId from Posts where PostTypeId=2 group by ParentId) primerarespuesta\r\nwhere pregunta.PostTypeId=1 and pregunta.Id = primerarespuesta.ParentId;\r\n/* minimo tiempo, primera respuesta, en segundos: 0 */\r\n\r\nselect MAX( Timestampdiff(second, pregunta.CreationDate, primerarespuesta.CreationDate ) )\r\n\tfrom Posts pregunta,\r\n\t(select MIN(CreationDate) as CreationDate, ParentId from Posts where PostTypeId=2 group by ParentId) primerarespuesta\r\nwhere pregunta.PostTypeId=1 and pregunta.Id = primerarespuesta.ParentId;\r\n/* máximo tiempo, primera respuesta, en segundos: 22230983, unos 257 días */\r\n\r\n```\r\n\r\n\r\n#### (opcional) Usando la tabla PostTags de la sesión anterior, calcular el tiempo medio, mínimo y máximo de la primera respuesta dependiendo del Tag.\r\n```mysql\r\nselect \r\n\tAVG( Timestampdiff(second, pregunta.CreationDate, primerarespuesta.CreationDate ) ) as medio,\r\n\tMIN( Timestampdiff(second, pregunta.CreationDate, primerarespuesta.CreationDate ) ) as minimo,\r\n\tMAX( Timestampdiff(second, pregunta.CreationDate, primerarespuesta.CreationDate ) ) as maximo,\r\n\tTagName\r\n\tfrom Posts pregunta inner join PostTags on pregunta.Id = PostTags.PostId,\r\n\t(select MIN(CreationDate) as CreationDate, ParentId from Posts where PostTypeId=2 group by ParentId) primerarespuesta\r\nwhere pregunta.PostTypeId=1 and pregunta.Id = primerarespuesta.ParentId\r\n\tgroup by TagId;\r\n```\r\n\r\n<!--\r\nC1:\r\n\r\nselect count(*) as number_of_users, times.count from (select count(*) as count from Posts where PostTypeId = 1 group by OwnerUserId) times group by count;\r\n\r\nC2:\r\nselect count(*) as number_of_users, times.count from (select count(*) as count from Posts where PostTypeId = 2 group by OwnerUserId) times group by count;\r\n\r\nC3:\r\nselect * from (select distinct OwnerUserId from Posts where PostTypeId = 1) as Questioners left join (select distinct OwnerUserId from Posts where PostTypeId = 2) as Answerers\r\n\r\ninsert into new_posts_meta_2 (postId, code_text_ratio, Score, rank, percentile, postNumber, prevScore)\r\n\r\nselect PostId, code_text_ratio, Score, @curRank := IF(@prevVal=Score, @curRank, @postNumber) AS rank, \r\n@percentile := IF(@prevVal=Posts.Score, @percentile, (@totalPosts - @postNumber + 1)/(@totalPosts)*100) as percentile,\r\n@postNumber := @postNumber + 1 as postNumber, \r\n@prevVal:=Posts.Score\r\nfrom new_posts_meta inner join Posts on Posts.Id = new_posts_meta.postId, (\r\nSELECT @curRank :=0, @prevVal:=null, @postNumber:=1, @percentile:=100\r\n) r\r\nwhere PostTypeId = 1\r\nORDER BY Posts.Score DESC;\r\n\r\n-->\r\n\r\n" }, { "alpha_fraction": 0.624790608882904, "alphanum_fraction": 0.6348408460617065, "avg_line_length": 20.961538314819336, "blob_id": "1f882a673232a17e46a5f3899a3c9d02e8a1371b", "content_id": "609aeebf9e4976468d553f3dd58dfb15ca90b1e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "permissive", "max_line_length": 54, "num_lines": 26, "path": "/practica2/CodeCount.py", "repo_name": "Loksly/bdge", "src_encoding": "UTF-8", "text": "from HTMLParser import HTMLParser\r\n\r\nclass CodeCount(HTMLParser):\r\n\r\n\tdef __init__(self):\r\n\t\tHTMLParser.__init__(self)\r\n\t\tself.nocode_length = 0\r\n\t\tself.code_length = 0\r\n\t\tself.inCode = 0\r\n\r\n\tdef run(self, string):\r\n\t\tself.feed(string)\r\n\r\n\tdef handle_starttag(self, tag, attrs):\r\n\t\tif tag == 'code':\r\n\t\t\tself.inCode = self.inCode + 1\r\n\r\n\tdef handle_endtag(self, tag):\r\n\t\tif tag == 'code':\r\n\t\t\tself.inCode = self.inCode - 1\r\n\r\n\tdef handle_data(self, data):\r\n\t\tif self.inCode > 0:\r\n\t\t\tself.code_length = self.code_length + len(data)\r\n\t\telse:\r\n\t\t\tself.nocode_length = self.nocode_length + len(data)\r\n" } ]
6
rfernandez2311/LightWave
https://github.com/rfernandez2311/LightWave
b0cb3a487125533bbb0df5f401ed95d498b51f55
2896c718cca3194bb3774f4cf58c13b914a9bceb
3c1dbe8d8dfc23c7c93fd9376fedb34500eccba1
refs/heads/master
2020-05-05T08:42:26.613830
2019-04-07T17:52:37
2019-04-07T17:52:37
179,874,495
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.52788907289505, "alphanum_fraction": 0.5352850556373596, "avg_line_length": 23.398496627807617, "blob_id": "c9bd9c2ca7e7948c817ce447fe2a845b644b79e2", "content_id": "45e65beb563fe3c501dd9f3c083abeec4a840f23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 3246, "license_type": "no_license", "max_line_length": 155, "num_lines": 133, "path": "/Web_App/dashboard.php", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "<?php\n require_once \"dbconnect.php\";\n?>\n\n<!DOCTYPE html>\n<html>\n<head>\n <meta charset=\"utf-8\" />\n <meta http-equiv=\"refresh\" content=\"15\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <title>LightWave</title>\n \n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n\n <link rel=\"stylesheet\" href=\"main.scss\" >\n <link href=\"https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css\" rel=\"stylesheet\" integrity=\"sha384-wvfXpqpZZVQGK6TAh5PVlGOfQNHSoD2xbE+QkPxCAFlNEevoEH3Sl0sibVcOQVnN\" crossorigin=\"anonymous\">\n <link href=\"https://fonts.googleapis.com/icon?family=Material+Icons\" rel=\"stylesheet\">\n\n <script src=\"main.js\"></script>\n <script async defer src= \"https://buttons.github.io/buttons.js\"></script>\n </head>\n <body>\n\n \n \n <div id=\"sideMenu\"> \n \n \n <a href=\"dashboard.php\" ><img src=\"lightwave.png\" alt=\"Lightwave Logo\"></a>\n\n <nav>\n\n <a href=\"dashboard.php\" class=\"active\"> <i class=\"fa fa-home\" aria-hidden=\"true\"></i> Home</a>\n <a href=\"about.php\" > <i class=\"fa fa-user\" aria-hidden=\"true\"></i>About Us</a>\n <a href=\"liveinfo.php\"><i class=\"fa fa-thermometer-half\" aria-hidden=\"true\"></i>Live Mesures</a>\n\n </nav>\n </div>\n\n\n <header>\n\n <div class=\"search-area\">\n <i class=\"fa fa-search\" aria-hidden=\"true\"></i>\n <input type=\"text\" name=\"\" value=\"\">\n </div>\n <div class=\"user-area\">\n\n \n </div>\n </header>\n\n \n <div class=\"flex-container\">\n\n <!--<div class=\"box0\">\n <p> Today's Time and Date</p>\n <img src=\"clock.png\"></img> \n <time id=\"demo\"></time>\n </div>-->\n\n\n\n\n <div class=\"box1\">\n <p class=\"title\">TEMPERATURE</p>\n <img src=\"temp.png\"></img> \n <?php\n \n\n $query = \"SELECT temp FROM Live_Info;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n echo \"<p>\".$row['temp'].\"°C\".\"</p>\"; \n }\n ?> \n </div>\n \n <div class=\"box2\">\n <p class=\"title2\">HUMIDITY</p>\n <img src=\"drop.png\"></img> \n <?php\n $query = \"SELECT hum FROM Live_Info;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n echo \"<p>\".$row['hum'].\"%\".\"</p>\"; \n }\n ?> \n </div>\n\n <div class=\"box3\">\n <p class=\"title3\">ALARM</p>\n <img src=\"alarm.png\"></img> \n <?php\n $query = \"SELECT fire_state, gas_state FROM Actuators;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n if($row['fire_state'] == \"0\" || $row['gas_state'] == \"1\"){\n echo \"<p class=\\\"action2\\\">ON</p>\";\n }\n else{\n echo \"<p class=\\\"action1\\\">OFF</p>\";\n }\n }\n ?>\n </div>\n\n \n\n\n\n <div class=\"box5\">\n <img src=\"stark.jpg\" >\n <h1>Tony Stark</h1>\n <p class=\"title\">CEO & Founder, Stark Industries</p>\n <p class=\"stu\"> Interamerican University - Bayamon</p>\n <div class=\"social\" style=\"margin: 30px 0;\">\n <a href=\"#\"><i class=\"fa fa-dribbble\"></i></a> \n <a href=\"#\"><i class=\"fa fa-twitter\"></i></a> \n <a href=\"#\"><i class=\"fa fa-linkedin\"></i></a> \n <a href=\"#\"><i class=\"fa fa-facebook\"></i></a> \n </div>\n \n </div>\n\n </body>\n</html>\n" }, { "alpha_fraction": 0.6239316463470459, "alphanum_fraction": 0.6239316463470459, "avg_line_length": 21.399999618530273, "blob_id": "43499393f6c7d2bd00ba5bfd1c921b89f2ece73a", "content_id": "9a29e3eeff1e3be8d54c144855ad8811b5278845", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 468, "license_type": "no_license", "max_line_length": 58, "num_lines": 20, "path": "/Web_App/registro.php", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "<?php\r\n\r\ninclude \"dbconnect.php\";\r\n\r\n$user_name = $_POST[\"Nombre\"];\r\n$user_lastname=$_POST[\"LastName\"];\r\n$user_username=$_POST[\"usuario\"];\r\n$user_password= $_POST[\"userpassword\"];\r\n$mysql_query = \"insert into Users (username,password) \r\nvalues ('$user_username','$user_password')\";\r\n\r\nif ($connection ->query($mysql_query)===TRUE){\r\n echo \"Insert Successful\";\r\n}\r\nelse{\r\n echo \"Error: \".$mysql_query.\"<br>\".$connection->error;\r\n}\r\n$connection -> close();\r\n\r\n?>\r\n" }, { "alpha_fraction": 0.6742424368858337, "alphanum_fraction": 0.6742424368858337, "avg_line_length": 32, "blob_id": "a1e251f92ffaad20e8c8580626c332efeb2b0773", "content_id": "dac9150c1567c9612a476a6bf860dd361f1fc644", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 264, "license_type": "no_license", "max_line_length": 76, "num_lines": 8, "path": "/Web_App/dbconnect.php", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "<?php\n define('DB_SERVER', 'localhost');\n define('DB_USERNAME', 'pi');\n define('DB_PASSWORD', '0000');\n define('DB_DATABASE', 'LightWave');\n $connection = mysqli_connect(DB_SERVER,DB_USERNAME,DB_PASSWORD,DB_DATABASE)\n\t or die ('Error Connecting to DB!');\n?>\n" }, { "alpha_fraction": 0.5854724049568176, "alphanum_fraction": 0.6018568873405457, "avg_line_length": 19.344444274902344, "blob_id": "0f51efa276555aa934a61d9116b30e0be3199055", "content_id": "eade64f0b8bda3a702cb097d279037e02b7f76c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3662, "license_type": "no_license", "max_line_length": 71, "num_lines": 180, "path": "/LightWave_Sensors_Code/LightWave_Sensors_Code.ino", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "#include <dht.h>\n#include <Wire.h>\n#include <avr/wdt.h>\n\n\n/* Varibale and Port Declaration*/\n#define lightSensorPort A0\n#define dhtSensorPort A1\n#define gasSensorPin A2\n#define flameSensorPort 3\n\n#define SLAVE_ADDRESS 0x04\n\nint temperature_read, humidity_read;\nint lightSensorValue, lightMeasureMap;\nint flameDetection;\nint gasDetection;\nint wireReadValue;\nchar state = 'R';\nint lastState = 1;\nint i;\nbyte gas_ppm[2];\n\n// the setup function runs once when you press reset or power the board\nvoid setup() {\n\n // initialize serial communication at 9600 bits per second:\n Serial.begin(9600);\n\n pinMode(flameSensorPort, INPUT);\n\n // initialize i2c as slave\n Wire.begin(SLAVE_ADDRESS);\n\n // define callbacks for i2c communication\n Wire.onReceive(receiveData);\n Wire.onRequest(sendData);\n\n Serial.println(\"***************************************\");\n Serial.println(\"* Welcome to Your LightWave System *\");\n Serial.println(\"* Enter R to Resume and S to Stop *\");\n Serial.println(\"***************************************\");\n Serial.println();\n\n}\n\nvoid loop()\n{\n state = Serial.read();\n\n if (state == 'R' || state == 'r') {\n lastState = 1;\n }\n else if (state == 'S' || state == 's') {\n lastState = 2;\n }\n\n if (lastState == 1) {\n ReadLight();\n TempAndHumid();\n FlameDetection();\n GasDetection();\n PrintData();\n Serial.println();\n }\n\n else if (lastState == 2) {\n Serial.println(\"************************************************\");\n Serial.println(\"* SYSTEM IS IDLE *\");\n Serial.println(\"************************************************\");\n }\n else {\n Serial.println(\"* PLEASE ENTER VALID COMMAND *\");\n }\n delay(2000);\n}\nvoid receiveData(int byteCount) {\n\n while (Wire.available()) {\n wireReadValue = Wire.read();\n }\n}\n\nvoid sendData() {\n\n switch (wireReadValue) {\n case 1:\n Wire.write((byte)temperature_read);\n break;\n case 2:\n Wire.write((byte)humidity_read);\n break;\n case 3:\n Wire.write((byte)lightMeasureMap);\n break;\n case 4:\n Wire.write((byte)flameDetection);\n break;\n case 5:\n gas_ppm[0] = ((byte)gasDetection >> 8) & 0xFF;\n Wire.write(gas_ppm[0]);\n break;\n case 6:\n gas_ppm[1] = ((byte)gasDetection) & 0xFF;\n Wire.write(gas_ppm[1]);\n break;\n case 7:\n Wire.write((byte)lastState);\n break;\n }\n\n \n}\n\nvoid ReadLight() {\n\n lightSensorValue = analogRead(lightSensorPort);\n lightMeasureMap = map(lightSensorValue, 0, 690, 0, 100);\n if (lightMeasureMap > 100) {\n lightMeasureMap = 100;\n }\n\n}\n\nvoid TempAndHumid() {\n\n dht tempAndHumid;\n\n /* save temperature and humidity reading to variables */\n tempAndHumid.read11(dhtSensorPort);\n temperature_read = tempAndHumid.temperature;\n humidity_read = tempAndHumid.humidity;\n\n}\n\nvoid FlameDetection() {\n\n flameDetection = digitalRead(flameSensorPort);\n\n}\n\nvoid GasDetection() {\n\n gasDetection = analogRead(gasSensorPin);\n\n}\n\nvoid PrintData() {\n\n Serial.print(\"Light: \");\n Serial.print(lightMeasureMap);\n Serial.println(\" %\");\n\n Serial.print(\"Temperature: \");\n Serial.print(temperature_read);\n Serial.println(\" C\");\n\n Serial.print(\"Humidity: \");\n Serial.print(humidity_read);\n Serial.println(\" %\");\n\n Serial.print(\"Gas Levels: \");\n Serial.print(gasDetection);\n Serial.print(\" ppm\");\n\n if (gasDetection > 400) {\n Serial.println(\" (HARMFUL GAS DETECTED!)\");\n }\n else {\n Serial.println(\" (Normal GAS Conditions)\");\n }\n\n Serial.print(\"Fire Detection: \");\n if (flameDetection == LOW) {\n Serial.println(\"FIRE DETECTED!\");\n }\n else {\n Serial.println(\"NO FIRE\");\n }\n}\n" }, { "alpha_fraction": 0.48441559076309204, "alphanum_fraction": 0.48766234517097473, "avg_line_length": 22.0625, "blob_id": "10e7a6239b88d3f0c68a638ee65ffd432bf18032", "content_id": "851a1af7040261ecd1bac10fb8e1ee874db649a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1540, "license_type": "no_license", "max_line_length": 82, "num_lines": 64, "path": "/Web_App/index.php", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "<?php\r\n require_once \"dbconnect.php\";\r\n\r\n if ($_SERVER[\"REQUEST_METHOD\"] == \"POST\"){\r\n\t \r\n $user = $_POST[\"username\"];\r\n $pwd = $_POST[\"password\"];\r\n\r\n $query = \"SELECT * FROM Users WHERE username='$user' AND password='$pwd';\";\r\n\r\n if (!$query) {\r\n\t printf(\"Error: %s\\n\", mysqli_error($connection));\r\n\t exit();\r\n }\r\n\r\n $result = mysqli_query($connection,$query);\r\n \r\n while($row = mysqli_fetch_array($result)){\r\n \r\n if ($user == $row['username'] && $pwd == $row['password']){\r\n\t mysqli_close($connection);\r\n\t header(\"Location: dashboard.php\");\r\n\t exit();\r\n }\r\n }\r\n}\r\n?>\r\n\r\n<html>\r\n <head>\r\n <title>Log In</title>\r\n <link rel=\"stylesheet\" type= \"text/css\" href=\"css/Login.css\">\r\n </head>\r\n <body>\r\n\r\n <div class=\"loginbox\">\r\n <img src=\"LW.png\" class=\"avatar\">\r\n\r\n <h1>Login Here</h1>\r\n\r\n\r\n <form method=\"post\">\r\n\r\n <p>Username</p>\r\n <input type=\"text\" name=\"username\" placeholder=\"Enter Username\" />\r\n <p>Password</p>\r\n <input type=\"password\" name=\"password\" placeholder=\"Enter Password\" />\r\n\r\n <button class=\"submit\" name=\"submit\">Login</button>\r\n\r\n \r\n\r\n </form>\r\n\t<script src=\"https://code.jquery.com/jquery-3.2.1.min.js\"></script>\r\n\t<script>\r\n\t$('.message a ').click(fucntion(){\r\n \t $('form').animate({height: \"toggle\", opacity: \"toggle\"}, \"slow\");\r\n\t});\r\n \t</script>\r\n \r\n </div>\r\n </body>\r\n\r\n</html>\r\n" }, { "alpha_fraction": 0.577675461769104, "alphanum_fraction": 0.5872650742530823, "avg_line_length": 27.491804122924805, "blob_id": "86ceb1b8f20f3a0af86459ba5d21009436c7fea5", "content_id": "148f8534c9d1ee506f2bb31c21d3eeac8d14b0fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5218, "license_type": "no_license", "max_line_length": 171, "num_lines": 183, "path": "/I2C_Comms.py", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "import smbus\nimport time\nfrom datetime import datetime\nfrom smbus2 import SMBusWrapper\nimport MySQLdb\n\ninit_message = True\nrun_time = 0\n\n#Initial Setup\n# for RPI version 1, use “bus = smbus.SMBus(0)”\nbus = smbus.SMBus(1)\n\n# This is the address we setup in the Arduino Program\naddress = 0x04\n\nprint(\"Establishing connection \\n\")\ndbName = \"LightWave\"\nusername = \"pi\"\npassword = \"0000\"\nhost = \"localhost\"\ndb = MySQLdb.connect(host,username,password,dbName)\ncur = db.cursor()\n################### Initial Setup ##################################\n\ndef writeNumber(value):\n bus.write_byte_data(address, 0, value)\n return -1\n\ndef readNumber():\n number = bus.read_byte(address)\n return number\n\ndef getDate():\n now = datetime.now()\n year = str(now.year)\n month = str(now.month)\n day = str(now.day)\n hour = str(now.hour)\n minutes = str(now.minute)\n date = [day,\"/\",month,\"/\",year,\"-\",hour,\":\",minutes]\n date = ''.join(date)\n \n return date\n\ndef lightControl(light_state):\n if light_state == True:\n writeNumber(int(1))\n print(\"Sending ON command for the light control to Arduino\") \n else:\n print(\"Sending OFF command for the light control to Arduino\")\n\n############### UPDATE DB ##########################################\ndef updateLiveInfo(light,gas,temp,hum,fire):\n print(\"Updating Live Info Table\")\n \n\n cur.execute(\"\"\"UPDATE Live_Info SET temp = '%s' \"\"\" %str(temp))\n db.commit()\n \n cur.execute(\"\"\"UPDATE Live_Info SET hum = '%s' \"\"\" %str(hum))\n db.commit()\n \n cur.execute(\"\"\"UPDATE Live_Info SET light = '%s' \"\"\" %str(light))\n db.commit()\n \n cur.execute(\"\"\"UPDATE Live_Info SET gas = '%s' \"\"\" %str(gas))\n db.commit()\n \n cur.execute(\"\"\"UPDATE Live_Info SET fire = '%s' \"\"\" %str(fire))\n db.commit()\n \n\ndef updateSensors(temperature,humidity,light,gas,fire):\n print(\"Updating Sensors Table\")\n \n cur.execute(\"\"\"INSERT INTO Sensors(temp,hum,light,gas,fire,date) VALUES(%s,%s,%s,%s,%s,%s)\"\"\",(str(temperature),str(humidity),str(light),str(gas),str(fire),getDate()))\n time.sleep(.5)\n db.commit()\n\ndef updateActuators(fire,gas):\n print(\"Updating Actuators Table\")\n \n cur.execute(\"\"\"UPDATE Actuators SET fire_state = '%s' \"\"\" %str(fire))\n db.commit()\n \n if int(gas) > 400:\n cur.execute(\"\"\"UPDATE Actuators SET gas_state = '%s' \"\"\" %str(1))\n db.commit()\n else:\n cur.execute(\"\"\"UPDATE Actuators SET gas_state = '%s' \"\"\" %str(0))\n db.commit()\n \n cur.execute(\"SELECT light,temp FROM Live_Info\")\n actuators = cur.fetchone()\n if int(actuators[0]) < 60:\n cur.execute(\"\"\"UPDATE Actuators SET light_state = '%s' \"\"\" %str(1))\n db.commit()\n else:\n cur.execute(\"\"\"UPDATE Actuators SET light_state = '%s' \"\"\" %str(0))\n db.commit() \n if int(actuators[1]) > 27:\n cur.execute(\"\"\"UPDATE Actuators SET fan_state = '%s' \"\"\" %str(1))\n db.commit()\n else:\n cur.execute(\"\"\"UPDATE Actuators SET fan_state = '%s' \"\"\" %str(0))\n db.commit()\n########### READ ALL VALUES FROM ARDUINO #####################\ndef checkTemperature():\n writeNumber(int(1))\n time.sleep(.5)\n print(\"Temperature Received:\",readNumber())\n return readNumber()\n\ndef checkHumidity():\n writeNumber(int(2))\n time.sleep(.5)\n print(\"Humidity Received:\",readNumber())\n return readNumber()\n \ndef checkLight(): \n writeNumber(int(3))\n time.sleep(.5)\n print(\"Light %: \",readNumber())\n return readNumber()\n\ndef checkFire(): #Sending #4 to Arduino\n writeNumber(int(4))\n time.sleep(.5)\n print(\"Fire: \",readNumber())\n return readNumber()\n\ndef checkGas(): #Sending #5 to Arduino\n writeNumber(int(5))\n time.sleep(.5)\n ppm_shifted_val = bus.read_byte(address)\n time.sleep(.1)\n writeNumber(int(6))\n time.sleep(.5)\n ppm_anded_val = bus.read_byte(address)\n time.sleep(.1)\n ppm_read = (ppm_shifted_val<<8) | ppm_anded_val\n print(\"Gas Level: \",ppm_read)\n return ppm_read\n\ndef checkShutDown(): #Checks if Shutdown was selected\n writeNumber(int(7))\n if readNumber() == int(2):\n return True\n else:\n return False\n######################## Functions ###################################\n\nwhile True:\n\n # System is ON\n if init_message == True:\n print (\"Starting up the LighWave System\")\n print (\"Current time:\",getDate())\n init_message = False\n \n # End if button was pusshed script\n if checkShutDown():\n print(\"LighWave System is Idle\")\n\n else: \n # Get all readings \n temp_measure = checkTemperature()\n humidity_measure = checkHumidity()\n gas_measure = checkGas()\n lightPer_measure = checkLight()\n fire_check = checkFire()\n\n #Update LiveInfo table \n updateLiveInfo(lightPer_measure,gas_measure,temp_measure,humidity_measure,fire_check)\n updateActuators(fire_check,gas_measure)\n \n #Update Sensors table \n if run_time > 30:\n updateSensors(temp_measure,humidity_measure,lightPer_measure,gas_measure,fire_check)\n run_time = 0\n \n run_time = run_time + 1\n" }, { "alpha_fraction": 0.5390269756317139, "alphanum_fraction": 0.5652782917022705, "avg_line_length": 19.702898025512695, "blob_id": "96de20ccd1788bbe923c562184989932b4e8ff16", "content_id": "4fe6148fe94b02532f1ab88a2f4cf964ea54da24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2857, "license_type": "no_license", "max_line_length": 114, "num_lines": 138, "path": "/Actuators_Code/Actuators_Code.ino", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "#include <ESP8266HTTPClient.h>\n#include <esp8266wifi.h>\n#include <ESP8266WiFi.h>\n#include <cstring>\n\n#define buzzerPin 16\n#define fanRelayPin 5\n#define lightRelayPin 4\n\nconst char* ssid = \"Familia\";\nconst char* password = \"mistybutter485\";\nString temp, hum, gas, light;\nString fire = \"1\";\nint value = 1;\n\nvoid setup () {\n\n Serial.begin(115200);\n WiFi.begin(ssid, password);\n\n while (WiFi.status() != WL_CONNECTED) {\n\n delay(1000);\n Serial.print(\"Connecting..\");\n\n }\n\n pinMode(buzzerPin, OUTPUT);\n pinMode(fanRelayPin, OUTPUT);\n pinMode(lightRelayPin, OUTPUT);\n\n}\n\nvoid loop() {\n\n if (WiFi.status() == WL_CONNECTED) { //Check WiFi connection status\n\n HTTPClient http; //Declare an object of class HTTPClient\n Serial.print(\"Connected! HTTP Code: \");\n\n http.begin(\"http://192.168.1.218/system_info.txt\"); //Specify request destination\n\n int httpCode = http.GET(); //Send the request\n Serial.println(httpCode);\n if (httpCode > 0) { //Check the returning code\n String payload = http.getString(); //Get the request response payload\n Serial.println(payload); //Print the response payload\n value = 1;\n assignValues(payload);\n }\n\n http.end(); //Close connection\n\n }\n printAll();\n actions();\n resetAll();\n delay(10000); //Send a request every 30 seconds\n\n}\n\nvoid assignValues(String payload) {\n for (int i = 0; i < payload.length(); i++) {\n\n if (payload.charAt(i) == ' ') {\n value++;\n }\n else {\n if (value == 1) {\n hum += payload[i];\n }\n else if (value == 2) {\n temp += payload[i];\n }\n else if (value == 3) {\n light += payload[i];\n }\n else if (value == 4) {\n fire = payload[i];\n }\n else if (value == 5) {\n gas += payload[i];\n }\n }\n }\n}\n\nvoid actions() {\n //Set alarm to on if fire is detected\n if (fire == \"0\") {\n tone(buzzerPin, 261);\n }\n else {\n noTone(buzzerPin);\n }\n //Set alarm to on if gas > 400\n if (atoi(gas.c_str()) < 400) { //cambie\n tone(buzzerPin, 261);\n }\n else {\n noTone(buzzerPin);\n } \n //Trun on Light if % is below 50%\n if (atoi(light.c_str()) < 50) {\n digitalWrite(lightRelayPin, HIGH);\n }\n else {\n digitalWrite(lightRelayPin, LOW);\n }\n //Turn on Fan if temp > 26\n if (atoi(temp.c_str()) > 26) {\n digitalWrite(fanRelayPin, HIGH);\n }\n else {\n digitalWrite(fanRelayPin, LOW);\n }\n}\n\nvoid resetAll() {\n value = 0;\n temp = \"\";\n hum = \"\";\n gas = \"\";\n light = \"\";\n}\n\nvoid printAll() {\n Serial.print(\"Hum = \");\n Serial.println(hum);\n Serial.print(\"Temp = \");\n Serial.println(temp);\n Serial.print(\"Light % = \");\n Serial.println(light);\n Serial.print(\"Fire = \");\n Serial.println(fire);\n Serial.print(\"Gas = \");\n Serial.println(gas);\n}\n" }, { "alpha_fraction": 0.6048780679702759, "alphanum_fraction": 0.6073170900344849, "avg_line_length": 20.77777862548828, "blob_id": "6d3c815f7b8e699bed98ed53a732f4902badeeda", "content_id": "8ebbb4608c030d693871801dfc47cb100680bb2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 410, "license_type": "no_license", "max_line_length": 80, "num_lines": 18, "path": "/Web_App/log-in.php", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "<?php\r\n\r\ninclude \"dbconnect.php\";\r\n\r\n$usuario =$_POST[\"usuario\"];\r\n$userpassword = $_POST[\"userpassword\"];\r\n$mysql_qry = 'select * from Users where username = \"'.$usuario.'\"\r\nand password = \"'.$userpassword.'\";';\r\n$result = mysqli_query($connection,$mysql_qry) or die ('error: '.mysql_error());\r\n\r\nif(mysqli_num_rows($result)==1){\r\n echo \"success\";\r\n}\r\nelse{\r\n echo \"log-in not success\";\r\n}\r\n\r\n?>\r\n" }, { "alpha_fraction": 0.5056322813034058, "alphanum_fraction": 0.5138081312179565, "avg_line_length": 24.840375900268555, "blob_id": "06c7510825e5c50678f55438c57c01598092af68", "content_id": "5b86deb5c28082e4baa6f80411393978b03d44b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 5505, "license_type": "no_license", "max_line_length": 155, "num_lines": 213, "path": "/Web_App/liveinfo.php", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "<?php\n require_once \"dbconnect.php\";\n?>\n\n<!DOCTYPE html> \n<html>\n<head>\n <meta charset=\"utf-8\" />\n <meta http-equiv=\"refresh\" content=\"15\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <title>Live Mesures</title>\n \n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n\n <link rel=\"stylesheet\" href=\"live.scss\" >\n <link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\">\n <script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js\"></script>\n <script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js\"></script>\n\n <link href=\"https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css\" rel=\"stylesheet\" integrity=\"sha384-wvfXpqpZZVQGK6TAh5PVlGOfQNHSoD2xbE+QkPxCAFlNEevoEH3Sl0sibVcOQVnN\" crossorigin=\"anonymous\">\n <link href=\"https://fonts.googleapis.com/icon?family=Material+Icons\" rel=\"stylesheet\">\n\n <script src=\"main.js\"></script>\n <script async defer src= \"https://buttons.github.io/buttons.js\"></script>\n </head>\n <body>\n\n \n \n <div id=\"sideMenu\"> \n \n \n <a href=\"dashboard.php\" ><img src=\"lightwave.png\" alt=\"Lightwave Logo\"></a>\n\n <nav>\n\n <a href=\"dashboard.php\" class=\"active\"> <i class=\"fa fa-home\" aria-hidden=\"true\"></i> Home</a>\n <a href=\"about.php\" > <i class=\"fa fa-user\" aria-hidden=\"true\"></i>About Us</a>\n <a href=\"liveinfo.php\"><i class=\"fa fa-thermometer-half\" aria-hidden=\"true\"></i>Live Mesures</a>\n <a href=\"index.php\" class=\"btn\"><span class=\"glyphicon glyphicon-log-out\"></span> Log out </a>\n </nav>\n </div>\n\n <!------------------- header ---------------------->\n <header>\n\n <div class=\"search-area\">\n <i class=\"fa fa-search\" aria-hidden=\"true\"></i>\n <input type=\"text\" name=\"\" value=\"\">\n </div>\n <div class=\"user-area\">\n\n \n </div>\n </header>\n\n\n\n <!------------------- dashboard cards ---------------------->\n <div class=\"flex-container\">\n \n <div class=\"box1\">\n <p class=\"title\">TEMPERATURE</p>\n <img src=\"temp.png\"></img>\n <?php\n \n\n $query = \"SELECT temp FROM Live_Info;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n echo \"<p>\".$row['temp'].\"°C\".\"</p>\"; \n }\n ?> \n </div>\n\n\n <div class=\"box2\">\n <p class=\"title2\">HUMIDITY</p>\n <img src=\"drop.png\"></img> \n <?php\n $query = \"SELECT hum FROM Live_Info;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n echo \"<p>\".$row['hum'].\"%\".\"</p>\"; \n }\n ?> \n </div>\n\n <div class=\"box3\">\n <p class=\"title3\">GAS</p>\n <img src=\"gas.png\"></img> \n <?php\n \n $query = \"SELECT gas FROM Live_Info;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n echo \"<p>\".$row['gas'].\"PPM\".\"</p>\"; \n }\n \n ?> \n</div>\n\n<div class=\"box4\">\n <p class=\"title4\">FAN</p>\n <img src=\"fan2.png\"></img> \n <?php\n $query = \"SELECT fan_state FROM Actuators;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n if($row['fan_state'] == \"1\"){\n echo \"<p class=\\\"action2\\\">ON</p>\";\n }\n else{\n echo \"<p class=\\\"action1\\\">OFF</p>\";\n }\n }\n ?> \n \n </div>\n\n <div class=\"box5\">\n <p class=\"title5\">FIRE</p>\n <img src=\"fire.png\"></img> \n <?php\n $query = \"SELECT fire_state FROM Actuators;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n if($row['fire_state'] == \"0\"){\n echo \"<p class=\\\"action4\\\">ON</p>\";\n }\n else{\n echo \"<p class=\\\"action3\\\">OFF</p>\";\n }\n }\n ?> \n \n </div>\n\n <div class=\"box6\">\n <p class=\"title6\">LIGHT</p>\n <img src=\"bulb.png\"></img>\n <?php\n $query = \"SELECT light_state FROM Actuators;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n if($row['light_state'] == \"1\"){\n echo \"<p class=\\\"action6\\\">ON</p>\";\n }\n else{\n echo \"<p class=\\\"action5\\\">OFF</p>\";\n }\n }\n \n ?> \n </div>\n\n <div class=\"box7\">\n <p class=\"title7\">ALARM</p>\n <img src=\"alarm.png\"></img> \n <?php\n $query = \"SELECT fire_state, gas_state FROM Actuators;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n if($row['fire_state'] == \"0\" || $row['gas_state'] == \"1\"){\n echo \"<p class=\\\"action8\\\">ON</p>\";\n }\n else{\n echo \"<p class=\\\"action7\\\">OFF</p>\";\n }\n }\n ?>\n </div>\n \n <?php\n $myfile = fopen(\"/var/www/LightWave/system_info.txt\",\"w\") or die(\"Unable to open file!\");\n\n $query = \"SELECT * FROM Live_Info;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n echo \"<tr><td>\".$row['hum'].\"</td><td>\".$row['temp'].\"</td><td>\".$row['light'].\"</td><td>\".$row['fire'].\"</td><td>\".$row['gas'].\"</td></tr>\";\n $table = $row['hum'].\" \".$row['temp'].\" \".$row['light'].\" \".$row['fire'].\" \".$row['gas'];\n fwrite($myfile,(string)$table);\n fwrite($myfile,\"\\n\"); \n }\n fclose($myfile);\n mysqli_close($connection);\n ?> \n \n\n <!------------------- dashboard cards#2 ---------------------->\n\n \n\n\n\n </body>\n</html>\n" }, { "alpha_fraction": 0.5967448353767395, "alphanum_fraction": 0.6071313619613647, "avg_line_length": 24.656593322753906, "blob_id": "db0682ba7f5aa37a6c8beadaa5d91344ba0b238b", "content_id": "35bb029993d5bd76e73cb6ae4097a47508616501", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9339, "license_type": "no_license", "max_line_length": 118, "num_lines": 364, "path": "/Project_Code/Project_Code.ino", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "#include <dht.h>\n#include <Arduino_FreeRTOS.h>\n#include <semphr.h> // add the FreeRTOS functions for Semaphores (or Flags).\n#include <Wire.h>\n\n/* Varibale and Port Declaration*/\n#define lightSensorPort A0\n#define dhtSensorPort A1\n#define flameSensorPort 3\n#define gasSensorPin A3\n#define SLAVE_ADDRESS 0x04\n\nint temperature_read, humidity_read;\nint lightSensorValue, lightMeasureMap;\nint flameDetection;\nint gasDetection;\nint wireReadValue;\n\n// Declare a mutex Semaphore Handle which we will use to manage the Serial Port.\n// It will be used to ensure only only one Task is accessing this resource at any time.\nSemaphoreHandle_t xSerialSemaphore;\n\n// define two Tasks for DigitalRead & AnalogRead\nvoid TaskDigital( void *pvParameters );\nvoid TaskLightMeasure( void *pvParameters );\nvoid TaskTempAndHumid( void *pvParameters );\nvoid TaskFlameDetection( void *pvParameters );\n//void TaskGasDetection( void *pvParameters );\n//void TaskPrint( void *pvParameters );\n//void TaskSendData( void *pvParameters );\n\n// the setup function runs once when you press reset or power the board\nvoid setup() {\n\n // initialize serial communication at 9600 bits per second:\n Serial.begin(9600);\n\n // initialize i2c as slave\n //Wire.begin(SLAVE_ADDRESS);\n\n // define callbacks for i2c communication\n //Wire.onReceive(receiveData);\n //Wire.onRequest(sendData);\n\n while (!Serial) {\n ; // wait for serial port to connect. Needed for native USB, on LEONARDO, MICRO, YUN, and other 32u4 based boards.\n }\n\n // Semaphores are useful to stop a Task proceeding, where it should be paused to wait,\n // because it is sharing a resource, such as the Serial port.\n // Semaphores should only be used whilst the scheduler is running, but we can set it up here.\n if ( xSerialSemaphore == NULL ) // Check to confirm that the Serial Semaphore has not already been created.\n {\n xSerialSemaphore = xSemaphoreCreateMutex(); // Create a mutex semaphore we will use to manage the Serial Port\n if ( ( xSerialSemaphore ) != NULL )\n xSemaphoreGive( ( xSerialSemaphore ) ); // Make the Serial Port available for use, by \"Giving\" the Semaphore.\n }\n\n // Now set up two Tasks to run independently.\n xTaskCreate(\n TaskDigital\n , (const portCHAR *)\"Digital\" // A name just for humans\n , 128 // This stack size can be checked & adjusted by reading the Stack Highwater\n , NULL\n , 0 // Priority, with 3 (configMAX_PRIORITIES - 1) being the highest, and 0 being the lowest.\n , NULL );\n\n xTaskCreate(\n TaskLightMeasure\n , (const portCHAR *) \"LightMeasure\"\n , 128 // Stack size\n , NULL\n , 1 // Priority\n , NULL );\n\n xTaskCreate(\n TaskTempAndHumid\n , (const portCHAR *) \"TempAndHumid\"\n , 128 // Stack size\n , NULL\n , 1 // Priority\n , NULL );\n// xTaskCreate(\n// TaskPrint\n// , (const portCHAR *) \"Print\"\n// , 256 // Stack size\n// , NULL\n// , 2 // Priority\n// , NULL );\n xTaskCreate(\n TaskFlameDetection\n , (const portCHAR *) \"FlameDetection\"\n , 128 // Stack size\n , NULL\n , 3 // Priority\n , NULL );\n\n // xTaskCreate(\n // TaskGasDetection\n // , (const portCHAR *) \"GasDetection\"\n // , 128 // Stack size\n // , NULL\n // , 1 // Priority\n // , NULL );\n\n\n // xTaskCreate(\n // TaskSendData\n // , (const portCHAR *) \"SendData\"\n // , 128 // Stack size\n // , NULL\n // , 3 // Priority\n // , NULL );\n\n // Now the Task scheduler, which takes over control of scheduling individual Tasks, is automatically started.\n}\n\nvoid loop()\n{\n // Empty. Things are done in Tasks.\n}\n\n/*--------------------------------------------------*/\n/*---------------------- Tasks ---------------------*/\n/*--------------------------------------------------*/\n\nvoid TaskDigital( void *pvParameters __attribute__((unused)) ) // This is a Task.\n{\n /*\n DigitalReadSerial\n Reads a digital input on pin 2, prints the result to the serial monitor\n\n This example code is in the public doma#include <Wire.h>\n */\n\n // digital pin 2 has a pushbutton attached to it. Give it a name:\n uint8_t pushButton = 2;\n\n // make the pushbutton's pin an input:\n pinMode(pushButton, INPUT);\n\n for (;;) // A Task shall never return or exit.\n {\n // read the input pin:\n int buttonState = digitalRead(pushButton);\n\n // See if we can obtain or \"Take\" the Serial Semaphore.\n // If the semaphore is not available, wait 5 ticks of the Scheduler to see if it becomes free.\n if ( xSemaphoreTake( xSerialSemaphore, ( TickType_t ) 5 ) == pdTRUE )\n {\n // We were able to obtain or \"Take\" the semaphore and can now access the shared resource.\n // We want to have the Serial Port for us alone, as it takes some time to print,\n // so we don't want it getting stolen during the middle of a conversion.\n // print out the state of the button:\n Serial.println(buttonState);\n\n xSemaphoreGive( xSerialSemaphore ); // Now free or \"Give\" the Serial Port for others.\n }\n\n vTaskDelay(1); // one tick delay (15ms) in between reads for stability\n }\n}\n\nvoid TaskLightMeasure( void *pvParameters __attribute__((unused)) )\n{\n for (;;)\n {\n lightSensorValue = analogRead(lightSensorPort);\n\n if ( xSemaphoreTake( xSerialSemaphore, ( TickType_t ) 5 ) == pdTRUE )\n {\n lightMeasureMap = map(lightSensorValue, 0, 690, 0, 100);\n if (lightMeasureMap > 100) {\n lightMeasureMap = 100;\n }\n xSemaphoreGive( xSerialSemaphore );\n }\n\n vTaskDelay(1);\n }\n}\n\nvoid TaskTempAndHumid( void *pvParameters __attribute__((unused)) )\n{\n dht tempAndHumid;\n\n for (;;)\n {\n /* save temperature and humidity reading to variables */\n tempAndHumid.read11(dhtSensorPort);\n\n\n if ( xSemaphoreTake( xSerialSemaphore, ( TickType_t ) 5 ) == pdTRUE )\n {\n temperature_read = tempAndHumid.temperature;\n humidity_read = tempAndHumid.humidity;\n\n xSemaphoreGive( xSerialSemaphore );\n }\n\n vTaskDelay(1);\n }\n}\n\nvoid TaskFlameDetection( void *pvParameters __attribute__((unused)) )\n{\n pinMode(flameSensorPort, INPUT);\n for (;;)\n {\n\n flameDetection = digitalRead(flameSensorPort);\n\n if ( xSemaphoreTake( xSerialSemaphore, ( TickType_t ) 5 ) == pdTRUE )\n {\n\n if (flameDetection == LOW) {\n Serial.println(\"FIRE DETECTED!\");\n }\n\n else {\n Serial.println(\"NO FIRE\");\n }\n\n xSemaphoreGive( xSerialSemaphore );\n }\n\n vTaskDelay(1);\n }\n}\n\n//void TaskGasDetection( void *pvParameters __attribute__((unused)) )\n//{\n//\n// for (;;)\n// {\n// gasDetection = analogRead(gasSensorPin);\n//\n// if ( xSemaphoreTake( xSerialSemaphore, ( TickType_t ) 5 ) == pdTRUE )\n// {\n//\n// if (gasDetection > 400) {\n// Serial.println(\"HARMFUL GAS DETECTED!\");\n// }\n//\n// else {\n// Serial.println(\"NORMAL GAS LEVELS\");\n// }\n//\n// xSemaphoreGive( xSerialSemaphore );\n// }\n//\n// vTaskDelay(1);\n// }\n//}\n//void TaskPrint( void *pvParameters __attribute__((unused)) )\n//{\n//\n// for (;;)\n// {\n//\n// if ( xSemaphoreTake( xSerialSemaphore, ( TickType_t ) 5 ) == pdTRUE )\n// {\n// Serial.print(\"Light: \");\n// Serial.print(lightMeasureMap);\n// Serial.println(\" %\");\n//\n// Serial.print(\"Temperature: \");\n// Serial.print(temperature_read);\n// Serial.println(\" C\");\n//\n// Serial.print(\"Humidity: \");\n// Serial.print(humidity_read);\n// Serial.println(\" %\");\n//\n// Serial.print(\"Gas Levels: \");\n// Serial.println(gasDetection);\n// Serial.println(\" ppm\");\n//\n// Serial.print(\"Fire Detection: \");\n// if (flameDetection == LOW) {\n// Serial.println(\"FIRE DETECTED!\");\n// }\n// else {\n// Serial.println(\"NO FIRE\");\n// }\n//\n//\n// xSemaphoreGive( xSerialSemaphore );\n// }\n//\n// vTaskDelay(1);\n// }\n//}\n//void TaskSendData( void *pvParameters __attribute__((unused)) )\n//{\n//\n// for (;;)\n// {\n// /* save temperature and humidity reading to variables */\n//\n// if ( xSemaphoreTake( xSerialSemaphore, ( TickType_t ) 5 ) == pdTRUE )\n// {\n//\n// PrintData();\n// xSemaphoreGive( xSerialSemaphore );\n// }\n//\n// vTaskDelay(20);\n// }\n//}\n\n//void receiveData(int byteCount) {\n//\n// while (Wire.available()) {\n// wireReadValue = Wire.read();\n// }\n//}\n//\n//void sendData() {\n//\n// switch (wireReadValue) {\n// case 1:\n// Wire.write((byte)temperature_read);\n// break;\n// case 2:\n// Wire.write((byte)humidity_read);\n// break;\n// case 3:\n// Wire.write((byte)lightMeasureMap);\n// break;\n// case 4:\n// Wire.write((byte)flameDetection);\n// break;\n// case 5:\n// Wire.write((byte)gasDetection);\n// break;\n// }\n//}\n//\n//void printData() {\n// Serial.print(\"Light: \");\n// Serial.print(lightMeasureMap);\n// Serial.println(\" %\");\n//\n// Serial.print(\"Temperature: \");\n// Serial.print(temperature_read);\n// Serial.println(\" C\");\n//\n// Serial.print(\"Humidity: \");\n// Serial.print(humidity_read);\n// Serial.println(\" %\");\n//\n// Serial.print(\"Gas Levels: \");\n// Serial.println(gasDetection);\n// Serial.println(\" ppm\");\n//\n// Serial.print(\"Fire Detection: \");\n// if (flameDetection == LOW) {\n// Serial.println(\"FIRE DETECTED!\");\n// }\n// else {\n// Serial.println(\"NO FIRE\");\n// }\n//}\n" }, { "alpha_fraction": 0.79666668176651, "alphanum_fraction": 0.79666668176651, "avg_line_length": 59, "blob_id": "c7c7ed6a706eab83ee6580c996a4da33270f9417", "content_id": "b9acf60dad4eeac0ea107bf854e7fb307f143754", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 300, "license_type": "no_license", "max_line_length": 191, "num_lines": 5, "path": "/README.md", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "# LightWave\nMicro-Controller's Project to control a set of sensors and display info at webpage dashboard.\n\n\nTo mount this project youll need an arduino to run the code for the actuators, a raspberry pi for the database and to acomodate the php files and css all in the same folder to run everything.\n" }, { "alpha_fraction": 0.5589353442192078, "alphanum_fraction": 0.572877049446106, "avg_line_length": 27.690908432006836, "blob_id": "1f34ed189622ba2b56589d3395ab51c9e096a1af", "content_id": "898ac03d286358604dad3b93249442696cb718ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1578, "license_type": "no_license", "max_line_length": 146, "num_lines": 55, "path": "/Web_App/table.php", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "<?php\n require_once \"dbconnect.php\";\n?>\n<html>\n<head>\n <title>Table with Database</title>\n <meta http-equiv=\"refresh\" content=\"15\">\n</head>\n<body>\n<table ID=\"myTable\" BORDER=\"5\" WIDTH=\"50%\" CELLPADDING=\"4\" CELLSPACING=\"3\">\n <thead> \n <tr>\n <th COLSPAN=\"5\"><BR><h3>DB TABLE TEST</h3>\n </th>\n </tr>\n <tr>\n <th>MEASURED HUMIDITY</th>\n <th>MEASURED TEMPERATURE</th>\n <th>MEASURED LIGHT</th>\n <th>MEASURED FIRE</th>\n <th>MEASURED GAS</th>\n </tr>\n </thead>\n <tbody>\n <?php\n $myfile = fopen(\"/var/www/html/system_info.txt\",\"w\") or die(\"Unable to open file!\");\n\n $query = \"SELECT * FROM Live_Info;\";\n \n $result = mysqli_query($connection,$query);\n \n while ($row = mysqli_fetch_array($result)){\n echo \"<tr><td>\".$row['hum'].\"</td><td>\".$row['temp'].\"</td><td>\".$row['light'].\"</td><td>\".$row['fire'].\"</td><td>\".$row['gas'].\"</td></tr>\";\n $table = $row['hum'].\" \".$row['temp'].\" \".$row['light'].\" \".$row['fire'].\" \".$row['gas'];\n fwrite($myfile,(string)$table);\n fwrite($myfile,\"\\n\"); \n }\n fclose($myfile);\n mysqli_close($connection);\n?>\n</tbody>\n</table>\n\n<script type=\"text/javascript\" src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js\"></script>\n\n<script src=\"https://cdn.jsdelivr.net/npm/[email protected]/lib/jquery.tabletojson.min.js\" integrity=\"sha256-AqDz23QC5g2yyhRaZcEGhMMZwQnp8fC6sCZpf+e7pnw=\" crossorigin=\"anonymous\"></script>\n\n<!--<script type=\"text/javascript\">\n var auto_refresh = setInterval(function()\n {\n \t$('#myTable').load('table.php');\n }, 10000); \n</script>-->\n</body>\n</html>\n" }, { "alpha_fraction": 0.5887850522994995, "alphanum_fraction": 0.5919002890586853, "avg_line_length": 14.894737243652344, "blob_id": "fe274738d89aa6be75d2f9d53bbdc1db76ea52e5", "content_id": "c2e9d432152a25cf5a18e251bb18652e78245f5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 321, "license_type": "no_license", "max_line_length": 49, "num_lines": 19, "path": "/Web_App/readInfo.php", "repo_name": "rfernandez2311/LightWave", "src_encoding": "UTF-8", "text": "<?php\r\n\r\ninclude \"dbconnect.php\";\r\n\r\n$consulta = \"select * from Live_Info\";\r\n\r\n$result = $connection->query($consulta);\r\n\r\n$usuarios = array();\r\n\r\nwhile($fila = $result->fetch_array()){\r\n\t$usuarios[] = $fila;\r\n\t//$usuarios[] = array_map('utf8_encode', $fila);\r\n}\r\n\r\necho json_encode($usuarios);\r\n\r\n$result->close();\r\n?>\r\n" } ]
13
shiftyhead/GB_Algoritm
https://github.com/shiftyhead/GB_Algoritm
7d39d7c65993d8522424f7637e7bc72229260f56
54c214899126a8428bd4cc3fb67902a2b79a75be
1b7c4df9cc548d61016b5a0ea735a216a63f5930
refs/heads/master
2020-04-02T23:30:31.090907
2018-11-21T13:22:27
2018-11-21T13:22:27
154,870,083
0
0
null
2018-10-26T17:26:56
2018-11-21T06:06:13
2018-11-21T12:02:24
Python
[ { "alpha_fraction": 0.6765498518943787, "alphanum_fraction": 0.7035040259361267, "avg_line_length": 23.733333587646484, "blob_id": "84aefca34cb72864c00da17e4146e046aeb18491", "content_id": "8f24c420cddede758be5eb55156825425997ad6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 82, "num_lines": 15, "path": "/task2_3.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n3. Сформировать из введенного числа обратное по порядку входящих в него цифр\nи вывести на экран. Например, если введено число 3486, то надо вывести число 6843.\n\"\"\"\n\nuser_input = input(\"Введите целое число: \")\n\nlength = len(user_input)\n\nreverse = []\n\nfor i in range(length):\n reverse.append(user_input[length - i - 1])\n\nprint('обратное число: ' + ''.join(reverse))\n" }, { "alpha_fraction": 0.6017130613327026, "alphanum_fraction": 0.6295503377914429, "avg_line_length": 22.350000381469727, "blob_id": "84c13c0f41b4f3f6662fbf0c268010e545b23b3a", "content_id": "46bed1641879a8188a93014329955645e7ba6ccc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "no_license", "max_line_length": 78, "num_lines": 20, "path": "/task2_5.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n5. Вывести на экран коды и символы таблицы ASCII,\nначиная с символа под номером 32 и заканчивая 127-м включительно.\nВывод выполнить в табличной форме: по десять пар «код-символ» в каждой строке.\n\"\"\"\n\nBEGIN = 32\nEND = 127\nROW_LENGTH = 10\n\nrows = (END - BEGIN) / ROW_LENGTH\n\nfor i in range(round(rows)):\n res = {}\n for j in range(BEGIN, BEGIN + ROW_LENGTH):\n if j > END:\n break\n res[j] = chr(j)\n print(res)\n BEGIN += ROW_LENGTH\n" }, { "alpha_fraction": 0.7156756520271301, "alphanum_fraction": 0.7340540289878845, "avg_line_length": 33.25925827026367, "blob_id": "3fbd3bc341642f0e3a03e8c73659b0d725263ffd", "content_id": "7a627af9b11763d963a5ad7d7bee2cc8fd4b2334", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1351, "license_type": "no_license", "max_line_length": 91, "num_lines": 27, "path": "/task1_4.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "# 4. Написать программу, которая генерирует в указанных пользователем границах:\n# случайное целое число;\n# случайное вещественное число;\n# случайный символ. Для каждого из трех случаев пользователь задает свои границы диапазона.\n# Например, если надо получить случайный символ от 'a' до 'f', то вводятся эти символы.\n# Программа должна вывести на экран любой символ алфавита от 'a' до 'f' включительно.\n\nimport random\nimport string\n\ni1 = int(input('вывести целое число с:\\n'))\ni2 = int(input('по\\n'))\n\nprint('случайное целое: ', random.randint(i1, i2))\n\nf1 = float(input('вывести вещественное лисло с:\\n'))\nf2 = float(input('по\\n'))\n\nprint('случайное вещественное: ', random.uniform(f1, f2))\n\ns1 = input('вывести символ с:\\n').lower()\ns2 = input('по\\n').lower()\nsi1 = string.ascii_lowercase.find(s1)\nsi2 = string.ascii_lowercase.find(s2)\nsr = string.ascii_lowercase[random.randint(si1, si2)]\n\nprint('случайный символ: ', sr)\n" }, { "alpha_fraction": 0.64226895570755, "alphanum_fraction": 0.6495882868766785, "avg_line_length": 29.36111068725586, "blob_id": "c46a76812db90b0be1babea4ce4fc58770182c52", "content_id": "44d3c3640cdf90f344f639233e92c51b1a5be10b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 92, "num_lines": 36, "path": "/task5_1.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n1. Пользователь вводит данные о количестве предприятий, их наименования\nи прибыль за 4 квартала (т.е. 4 отдельных числа) для каждого предприятия..\nПрограмма должна определить среднюю прибыль (за год для всех предприятий)\nи вывести наименования предприятий, чья прибыль выше среднего\nи отдельно вывести наименования предприятий, чья прибыль ниже среднего.\n\"\"\"\n\nfrom collections import Counter\n\nnum_ = int(input('Сколько предприятий? '))\nitems_ = {}\nsum_all = Counter()\n\nfor i in range(num_):\n title = input(f'Предприятие {i + 1}. Название: ')\n sum_ = 0\n for j in range(1, 5):\n sum_ += int(input(f'Прибыль за квартал {j}: '))\n avg_ = sum_ / 4\n\n items_[title] = Counter(\n {\n 'sum_': sum_,\n 'avg_': avg_\n }\n )\n sum_all += items_[title] # притянуто за уши конечно, но больше ничего не пришло в голову\n\navg_all = sum_all['sum_'] / num_\nprint(f'Средняя прибыль за год: {avg_all}')\n\nhigh = {j for j in items_ if items_[j]['sum_'] > avg_all}\nlow = set(items_.keys()) - set(high)\nprint(f'Выше среднего: {high}')\nprint(f'Ниже среднего: {low}')\n" }, { "alpha_fraction": 0.5683296918869019, "alphanum_fraction": 0.585683286190033, "avg_line_length": 18.20833396911621, "blob_id": "ff59370511b5bba06fe8d4fc4c1e2ef41c81bee3", "content_id": "1ae0013c38c3311d3911469356b816d9e6a325ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "no_license", "max_line_length": 67, "num_lines": 24, "path": "/task3_4.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n4. Определить, какое число в массиве встречается чаще всего.\n\"\"\"\n\nimport random\n\nSIZE = 10\nsource = [random.randint(0, SIZE) for _ in range(0, SIZE)]\n\ncount_dict = {}\nfor val in source:\n count = 0\n for i in source:\n if i == val:\n count += 1\n count_dict[count] = val\n\nmax_ = 0\nfor j in count_dict:\n if j > max_:\n max_ = j\n\nprint(f'в массиве: {source}')\nprint(f'самое частое число: {count_dict[max_]} (повторов: {max_})')\n" }, { "alpha_fraction": 0.5290322303771973, "alphanum_fraction": 0.5903225541114807, "avg_line_length": 22.846153259277344, "blob_id": "3fa8c990302ae63ce5e63215be349b926a0d067d", "content_id": "705389926940d77bdc82ca7f8a25b7c3b321c2ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "no_license", "max_line_length": 60, "num_lines": 13, "path": "/task3_1.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n1. В диапазоне натуральных чисел от 2 до 99 определить,\nсколько из них кратны любому из чисел в диапазоне от 2 до 9.\n\"\"\"\n\nprint('В диапазоне 2-99')\n\nfor i in range(2, 10):\n count = 0\n for j in range(2, 100):\n if j % i == 0:\n count += 1\n print(f'числу {i} кратно {count} чисел,')\n" }, { "alpha_fraction": 0.6603773832321167, "alphanum_fraction": 0.6832883954048157, "avg_line_length": 27.538461685180664, "blob_id": "b2fa1bb31a2340a75837547577c2655469d37a78", "content_id": "2299265a5978a751eb404cb9351b5e514f01de69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1058, "license_type": "no_license", "max_line_length": 62, "num_lines": 26, "path": "/task2_6.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n6. В программе генерируется случайное целое число от 0 до 100.\nПользователь должен его отгадать не более чем за 10 попыток.\nПосле каждой неудачной попытки должно сообщаться,\nбольше или меньше загаданного введенное пользователем число.\nЕсли за 10 попыток число не отгадано, то вывести его.\n\"\"\"\n\nimport random\n\nsecret = random.randint(0, 100)\nsteps = 10\n# print(secret)\nfor i in range(1, steps + 1):\n answer = int(input(f'попытка {i}: Угадайте число: '))\n\n if answer == secret:\n print(f'Правильно! Ответ: {secret}')\n break\n elif secret < answer:\n print(f'Неверно. Ответ меньше, чем {answer}')\n else:\n print(f'Неверно. Ответ больше, чем {answer}')\n\nif i >= steps:\n print(f'Верный ответ: {secret}')\n" }, { "alpha_fraction": 0.6138613820075989, "alphanum_fraction": 0.669966995716095, "avg_line_length": 20.64285659790039, "blob_id": "22806fce872bc3afe03fc5b0ef0dc58cbe2431a6", "content_id": "ea4d39b43256feafc40b1bbf2ef468e765a5badf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 435, "license_type": "no_license", "max_line_length": 85, "num_lines": 14, "path": "/task1_1.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "# 1. Найти сумму и произведение цифр трехзначного числа, которое вводит пользователь.\n\nin3 = input('Введите трехзначное целое число:\\n')\n\ni1 = int(in3[0])\ni2 = int(in3[1])\ni3 = int(in3[2])\n\nsum = i1 + i2 + i3\n\nmul = i1 * i2 * i3\n\nprint('сумма цифр числа: ', sum)\nprint('произведение цифр числа: ', mul)\n" }, { "alpha_fraction": 0.5742856860160828, "alphanum_fraction": 0.6200000047683716, "avg_line_length": 22.33333396911621, "blob_id": "ab4267c213c002eba88eb99dd44712710f72d709", "content_id": "fcafe1bbee784afee1fae082a19d4cf64cadceb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 466, "license_type": "no_license", "max_line_length": 72, "num_lines": 15, "path": "/task2_4.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n4. Найти сумму n элементов следующего ряда чисел: 1 -0.5 0.25 -0.125 ...\nКоличество элементов (n) вводится с клавиатуры.\n\"\"\"\n\nlength = int(input(\"Введите целое число n: \"))\n\nrow = [1,]\n_sum = row[0]\n\nfor i in range(1, length):\n row.append(row[i - 1] / -2)\n _sum = _sum + row[i]\n\nprint(f'Сумма {length} элементов ряда {row} \\nравна {_sum}')\n" }, { "alpha_fraction": 0.6727272868156433, "alphanum_fraction": 0.7109090685844421, "avg_line_length": 33.375, "blob_id": "d0063299d67324125560978dcb57954c61016ba6", "content_id": "4dc3c3c9abec513b01ca097f0867fb2ff3c3a868", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 807, "license_type": "no_license", "max_line_length": 72, "num_lines": 16, "path": "/task3_2.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n2. Во втором массиве сохранить индексы четных элементов первого массива.\nНапример, если дан массив со значениями 8, 3, 15, 6, 4, 2,\nто во второй массив надо заполнить значениями 1, 4, 5, 6\n(или 0, 3, 4, 5 – если индексация начинается с нуля),\nт.к. именно в этих позициях первого массива стоят четные числа.\n\"\"\"\n\nimport random\n\nSIZE = 10\nsource = [random.randint(0, SIZE * SIZE) for _ in range(SIZE)]\n\nprint(f'В массиве {source}')\neven = [id for id, val in enumerate(source) if val % 2 == 0]\nprint(f'четные числа имеют следующие индексы: {even}')\n" }, { "alpha_fraction": 0.5558194518089294, "alphanum_fraction": 0.5985748171806335, "avg_line_length": 20.049999237060547, "blob_id": "c83db0529d798c15c6315e9388208a6048ac143c", "content_id": "97e9616524e7b7193bf2b7bf8238642865ce0ddd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 575, "license_type": "no_license", "max_line_length": 67, "num_lines": 20, "path": "/task2_2.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n2. Посчитать четные и нечетные цифры введенного натурального числа.\nНапример, если введено число 34560,\nто у него 3 четные цифры (4, 6 и 0) и 2 нечетные (3 и 5).\n\"\"\"\n\nuser_input = input(\"Введите целое число: \")\n\neven = 0\nodd = 0\n\nfor i in user_input:\n if int(i) % 2:\n odd += 1\n else:\n even += 1\n\nprint(f'всего цифр: {len(user_input)}, '\n f'из них нечетных: {odd}, '\n f'четных: {even} ')\n" }, { "alpha_fraction": 0.3697749078273773, "alphanum_fraction": 0.45016077160835266, "avg_line_length": 15.810811042785645, "blob_id": "50ab159a75565c1175f075dcd2132aa7d59ed2e9", "content_id": "f1ebc93d7f855013749d6294823fdded52be5c17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 80, "num_lines": 37, "path": "/task1_2.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "# 2. Выполнить логические побитовые операции «И», «ИЛИ» и др. над числами 5 и 6.\n# Выполнить над числом 5 побитовый сдвиг вправо и влево на два знака.\n# Объяснить полученный результат.\n\n_and = 5 & 6\n\n_or = 5 | 6\n\n_xor = 5 ^ 6\n\n_sl = 5 << 2\n\n_sr = 5 >> 2\n\nprint(\n '5 & 6 = ',\n '0b{0:b} & 0b{1:b} = 0b{2:b} = '.format(5, 6, _and),\n _and,\n )\n\nprint(\n '5 | 6 = ',\n '0b{0:b} | 0b{1:b} = 0b{2:b} = '.format(5, 6, _or),\n _or,\n )\n\nprint(\n '5 << 1 = ',\n '0b{0:b} << {1:b} = {2:b} = '.format(5, 2, _sl),\n _sl\n)\n\nprint(\n '5 >> 1 = ',\n '0b{0:b} >> {1:b} = {2:b} = '.format(5, 2, _sr),\n _sr\n)\n" }, { "alpha_fraction": 0.5918367505073547, "alphanum_fraction": 0.6054421663284302, "avg_line_length": 35.75, "blob_id": "e0c38979ec9ee3ec4dfba54c827475ed74fe9191", "content_id": "3574054bbcf9f3d7184e5f840363c3566b504cbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1943, "license_type": "no_license", "max_line_length": 76, "num_lines": 36, "path": "/task2_1.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n1. Написать программу, которая будет складывать, вычитать,\nумножать или делить два числа. Числа и знак операции вводятся пользователем.\nПосле выполнения вычисления программа не должна завершаться,\nа должна запрашивать новые данные для вычислений.\nЗавершение программы должно выполняться при вводе символа '0'\nв качестве знака операции. Если пользователь вводит неверный знак\n(не '0', '+', '-', '*', '/'), то программа должна сообщать ему об ошибке\nи снова запрашивать знак операции. Также сообщать пользователю\nо невозможности деления на ноль, если он ввел 0 в качестве делителя.\n\"\"\"\n\nwhile True:\n mode = input('выберите операцию(+, -, *, /) или введите 0 для выхода: ')\n\n if mode == '0':\n break\n\n if mode not in ['+', '-', '*', '/']:\n print('неверная операция')\n\n else:\n var1 = int(input('введите первый операнд (целое число): '))\n var2 = int(input('введите второй операнд (целое число): '))\n\n if mode == '+':\n print(f'сумма = {var1 + var2}')\n elif mode == '-':\n print(f'разность = {var1 - var2}')\n elif mode == '*':\n print(f'произведение = {var1 * var2}')\n elif mode == '/':\n if var2 == 0:\n print('делить на ноль нельзя')\n else:\n print(f'частное = {var1 / var2}')\n" }, { "alpha_fraction": 0.6289592981338501, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 22.263158798217773, "blob_id": "edf72c5701b55e06a7dd93f0d42845a231f7e0fa", "content_id": "83e77ed56d688c589a3bed2700bbcb9fbe47fc86", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 573, "license_type": "no_license", "max_line_length": 69, "num_lines": 19, "path": "/task3_5.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n5. В массиве найти максимальный отрицательный элемент.\nВывести на экран его значение и позицию в массиве.\n\"\"\"\n\nimport random\n\nSIZE = 10\nsource = [random.randint(-1 * SIZE, SIZE) for _ in range(0, SIZE)]\n\nmax_ = -1 * SIZE\nmax_id = 0\nfor id_, val in enumerate(source):\n if val < 0 and val > max_:\n max_ = val\n max_id = id_\n\nprint(f'в массиве {source}')\nprint(f'максимальное отрицательное число: {max_} (позиция {max_id})')\n" }, { "alpha_fraction": 0.5859519243240356, "alphanum_fraction": 0.5988909602165222, "avg_line_length": 19.037036895751953, "blob_id": "5df12c7e24f6cf10e0609f7e83e94073a3153e98", "content_id": "27d246c6b6d79be67faf522b98d437d150b425c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 66, "num_lines": 27, "path": "/task3_3.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n3. В массиве случайных целых чисел поменять местами\nминимальный и максимальный элементы.\n\"\"\"\n\nimport random\n\nSIZE = 10\nsource = [random.randint(0, SIZE * SIZE) for _ in range(SIZE)]\n\nmax_ = 0\nmax_id = 0\nmin_ = SIZE * SIZE\nmin_id = 0\n\nfor id_, val in enumerate(source):\n if val > max_:\n max_ = val\n max_id = id_\n if val < min_:\n min_ = val\n min_id = id_\n\nprint(f'исходный массив: {source}, макс.: {max_}, мин.: {min_}')\nsource[min_id] = max_\nsource[max_id] = min_\nprint(f'измененный массив: {source}')\n" }, { "alpha_fraction": 0.6834170818328857, "alphanum_fraction": 0.713567852973938, "avg_line_length": 21.11111068725586, "blob_id": "c35e5b270453231abeaf565dd8314f5b9d17f684", "content_id": "24a7020effd35e8be13e32487c40df128b30617b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 285, "license_type": "no_license", "max_line_length": 77, "num_lines": 9, "path": "/task1_6.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "# 6. Пользователь вводит номер буквы в алфавите. Определить, какая это буква.\n\nimport string\n\ni1 = int(input('введите индекс символа:\\n'))\n\ns1 = string.ascii_lowercase[i1 - 1]\n\nprint('символ: ', s1)\n" }, { "alpha_fraction": 0.6382978558540344, "alphanum_fraction": 0.6518375277519226, "avg_line_length": 23.619047164916992, "blob_id": "84265c1ae6d4b05e7e5b0b77d71df43d67d2f4d4", "content_id": "c83540a222a3d60ea0d5c44a068b07aab1ffa993", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 731, "license_type": "no_license", "max_line_length": 80, "num_lines": 21, "path": "/task3_8.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "\"\"\"\n8. Матрица 5x4 заполняется вводом с клавиатуры, кроме последних элементов строк.\nПрограмма должна вычислять сумму введенных элементов каждой строки и записывать\nее в ее последнюю ячейку. В конце следует вывести полученную матрицу.\n\"\"\"\n\nres = []\n\nfor i in range(4):\n sum_ = 0\n row = []\n\n for _ in range(4):\n user_input = int(input(f'Введите целое число для ряда {i + 1}: '))\n row.append(user_input)\n sum_ += user_input\n\n row.append(sum_)\n res.append(row)\n\nprint(*res, sep='\\n')\n" }, { "alpha_fraction": 0.41905856132507324, "alphanum_fraction": 0.4952162206172943, "avg_line_length": 29.38372039794922, "blob_id": "4acdd47e73e30bedd388ebf0673576d34b8cc8d0", "content_id": "04a198dae1a67c876d1c60e9b8b91a860abf3a9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3005, "license_type": "no_license", "max_line_length": 101, "num_lines": 86, "path": "/task5_2.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "# 2. Написать программу сложения и умножения двух шестнадцатеричных чисел.\n# При этом каждое число представляется как массив, элементы которого это цифры числа.\n# Например, пользователь ввёл A2 и C4F. Сохранить их как [‘A’, ‘2’] и [‘C’, ‘4’, ‘F’] соответственно.\n# Сумма чисел из примера: [‘C’, ‘F’, ‘1’], произведение - [‘7’, ‘C’, ‘9’, ‘F’, ‘E’].\nfrom collections import deque\n\ndef sum16(num1_, num2_):\n\n table16_10 = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9,\n 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15}\n table10_16 = {val:key for key, val in table16_10.items()}\n\n num1 = deque(num1_)\n num2 = deque(num2_)\n\n if len(num2) > len(num1):\n max_len = len(num2)\n num1.extendleft('0' * (max_len - len(num1)))\n else:\n max_len = len(num1)\n num2.extendleft('0' * (max_len - len(num2)))\n\n sum_ = deque()\n mem = 0\n\n for i in range(1, max_len + 1):\n ans_ = table16_10[num1.pop()] + table16_10[num2.pop()] + mem\n\n mem = 1 if ans_ >= 16 else 0\n ans_ = ans_ + (-16 * mem)\n\n sum_.extendleft(table10_16[ans_])\n\n if mem > 0:\n sum_.appendleft(mem)\n\n return ''.join(sum_)\n\ndef mul16(num1_, num2_):\n table16_10 = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9,\n 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15}\n table10_16 = {val:key for key, val in table16_10.items()}\n\n num1 = deque(num1_)\n num2 = deque(num2_)\n\n if len(num2) > len(num1):\n max_len = len(num2)\n min_len = len(num1)\n op1 = num2\n op2 = num1\n else:\n max_len = len(num1)\n min_len = len(num2)\n op1 = num1\n op2 = num2\n res = ''\n for j in range(1, min_len + 1):\n mul_ = deque()\n mem = 0\n for i in range(1, max_len + 1):\n ans_ = table16_10[op1[-i]] * table16_10[op2[-j]] + mem\n mem = ans_ // 16 if ans_ >= 16 else 0\n ans_ = table10_16[ans_ - (16 * mem)]\n mul_.appendleft(ans_)\n\n if mem > 0:\n mul_.appendleft(str(mem))\n if j > 1:\n mul_.append('0')\n res = sum16(res, ''.join(mul_))\n return res\n\nmode = input('Что будем делать? (\"*\" - умножать / \"+\" - складывать): ')\nif mode == '*':\n print(mul16(\n input('Введите первый множитель в HEX: '),\n input('Введите второй множитель в HEX: ')\n ))\nelse:\n print(sum16(\n input('Введите первое слагаемое в HEX: '),\n input('Введите второе слагаемое в HEX: ')\n ))\n \n# print(mul16('A2', 'C4F'))\n" }, { "alpha_fraction": 0.6813187003135681, "alphanum_fraction": 0.7120879292488098, "avg_line_length": 29.33333396911621, "blob_id": "c67b1268aa896229533ac627a3ae047b7e588acf", "content_id": "b669d5f04da0fa9b0fbad9c329d04380d2640f1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 82, "num_lines": 15, "path": "/task1_5.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "# 5. Пользователь вводит две буквы. Определить, на каких местах алфавита они стоят\n# и сколько между ними находится букв.\n\nimport string\n\ns1 = input('введите первый символ:\\n').lower()\ns2 = input('введите второй символ:\\n').lower()\n\nsi1 = string.ascii_lowercase.find(s1) + 1\nsi2 = string.ascii_lowercase.find(s2) + 1\nsr = abs(si1 - si2) - 1\n\nprint('индекс первого символа: ', si1)\nprint('индекс второго символа: ', si2)\nprint('символов между ними: ', sr)\n" }, { "alpha_fraction": 0.6162310838699341, "alphanum_fraction": 0.6506189703941345, "avg_line_length": 35.349998474121094, "blob_id": "ec7cd6228ea452f370e6c3d7488ea29c26fcae17", "content_id": "fa4cc7a43618d151d09b18156501b907e88847a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1055, "license_type": "no_license", "max_line_length": 80, "num_lines": 20, "path": "/task1_3.py", "repo_name": "shiftyhead/GB_Algoritm", "src_encoding": "UTF-8", "text": "# 3. По введенным пользователем координатам двух точек вывести уравнение прямой,\n# проходящей через эти точки.\n\nprint('Введите координаты первой точки')\nx1 = int(input('Координата х:'))\ny1 = int(input('Координата y:'))\nprint('Введите координаты второй точки')\nx2 = int(input('Координата х:'))\ny2 = int(input('Координата y:'))\n\n# решим систему уравнений вида y = kx + b\n# b = y1 - (k * x1) выразим b через уравнение первой точки\n# y2 = (k * x2) + y1 - (k * x1) подставим в уравнение второй точки\nk = (y2 - y1) / (x2 - x1) if x2 != x1 else 0 # выразим k и вычислим\nb = y1 - (k * x1) # вычислим b\n\nif k != 0:\n print('уравнение прямой: y = {0:f}x + {1:f}'.format(k, b))\nelse:\n print('уравнение прямой: x = {0:f}'.format(x1))\n" } ]
20
Merigold89/Testing-trial
https://github.com/Merigold89/Testing-trial
926799eb9d5e47d0d31eb62cec2c0956236e3e5e
3c1ebeb337a146c3394f37800929a8172c9dc25a
ee3fc0e21c0058f173dae448713e27ed77330c77
refs/heads/main
2023-04-16T09:57:24.018185
2021-04-30T00:16:52
2021-04-30T00:16:52
335,420,920
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5240963697433472, "alphanum_fraction": 0.525602400302887, "avg_line_length": 22.44444465637207, "blob_id": "fed640c6af9115ad057a0730279267c81d7e823d", "content_id": "3420273959d056d7c1429d730df743d6cf81eaa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 668, "license_type": "no_license", "max_line_length": 109, "num_lines": 27, "path": "/calculator.py", "repo_name": "Merigold89/Testing-trial", "src_encoding": "UTF-8", "text": "class Calculator:\r\n\r\n def __init__(self, first, second):\r\n self.first = first\r\n self.second = second\r\n\r\n def add(self):\r\n \"\"\" Addition \"\"\"\r\n return self.first + self.second # przy zamianie na \"+\" na \"-\" pojawią się błędy w test_calculator.py\r\n\r\n\r\n def multiply(self):\r\n \"\"\" Multiplication \"\"\"\r\n return self.first * self.second\r\n\r\n\r\n def subtract(self):\r\n \"\"\" Subtraction \"\"\"\r\n return self.first - self.second\r\n\r\n\r\n def divide(self):\r\n \"\"\" Division \"\"\"\r\n if self.second == 0:\r\n pass\r\n #return ZeroDivisionError\r\n return self.first / self.second\r\n\r\n\r\n" }, { "alpha_fraction": 0.5348837375640869, "alphanum_fraction": 0.5478637218475342, "avg_line_length": 26.045454025268555, "blob_id": "c4b5e05b3e3a6d73ef07bb36c164bf1dd906bd29", "content_id": "80f2a00551752ea73468d625ef6c2ab188e6cab4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1850, "license_type": "no_license", "max_line_length": 65, "num_lines": 66, "path": "/calculator_test.py", "repo_name": "Merigold89/Testing-trial", "src_encoding": "UTF-8", "text": "import unittest\r\nimport calculator\r\n# aby uruchomić: linia komend: python -m unittest test_calculator\r\n\r\n\r\nclass TestCalculator(unittest.TestCase):\r\n\r\n def setUp(self):\r\n \"\"\"\r\n Is executed before every test method.\r\n \"\"\"\r\n self.calc = calculator.Calculator(5, 1)\r\n print('setUp method')\r\n\r\n def tearDown(self):\r\n \"\"\"\r\n Is executed after every test method.\r\n \"\"\"\r\n self.calc = calculator.Calculator(1, 0)\r\n print('tearDown method')\r\n\r\n def test_add(self):\r\n \"\"\"\r\n Tests for the add() function.\r\n \"\"\"\r\n self.assertEqual(self.calc.add(), 6)\r\n self.calc.first = 8\r\n self.calc.second = 2\r\n self.assertEqual(self.calc.add(), 10)\r\n print('test_add method')\r\n\r\n def test_subtract(self):\r\n \"\"\"\r\n Tests for the subtract() function.\r\n \"\"\"\r\n self.assertEqual(self.calc.subtract(), 4)\r\n self.calc.first = 8\r\n self.calc.second = 2\r\n self.assertEqual(self.calc.subtract(), 6)\r\n print('test_subtract method')\r\n\r\n def test_multiply(self):\r\n \"\"\"\r\n Tests for the multiply() function.\r\n \"\"\"\r\n self.assertEqual(self.calc.multiply(), 5)\r\n self.calc.first = 8\r\n self.calc.second = 2\r\n self.assertEqual(self.calc.multiply(), 16)\r\n print('test_multiply method')\r\n\r\n def test_divide(self):\r\n \"\"\"\r\n Tests for the divide() function.\r\n \"\"\"\r\n self.assertEqual(self.calc.divide(), 5)\r\n self.calc.first = 0\r\n self.calc.second = 5\r\n self.assertEqual(self.calc.divide(), 0)\r\n self.calc = calculator.Calculator(1, 0)\r\n #self.assertEqual(self.calc.divide(), ZeroDivisionError)\r\n print('test_divide method')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main()" }, { "alpha_fraction": 0.6428298354148865, "alphanum_fraction": 0.6703633069992065, "avg_line_length": 48.339622497558594, "blob_id": "6f6bad22846800bb8e99f5af9d551b6150c73c39", "content_id": "72a4d59cfb3d7a17f539b1a7afa2cb83a8062b25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2642, "license_type": "no_license", "max_line_length": 137, "num_lines": 53, "path": "/test_calculator_simple.py", "repo_name": "Merigold89/Testing-trial", "src_encoding": "UTF-8", "text": "import unittest\nimport calculator_simple\n# aby uruchomić: linia komend: python -m unittest test_calculator lub dodać ostatni fragment kodu\n# plik test_calclator.py i calculator.py muszą być w jednym folderze\nclass TestCalculator(unittest.TestCase): # a test case for the calculator.py module\n\n def test_add(self):\n \"\"\"\n Tests for the add() function.\n\n assertEqual - sprawdza, czy 2 podane argumenty są sobie równe: 2 zmienne ajko argumenty\n dla funkcji oraz oczekiwany wynik, dodatkowo komentarz w sypadku fail testu\n \"\"\"\n self.assertEqual(calculator_simple.add(6, 4), 10, 'Error when adding two positive numbers')\n self.assertEqual(calculator_simple.add(6, -4), 2, 'Error when adding two positive and negative numbers')\n self.assertEqual(calculator_simple.add(-6, 4), -2, 'Error when adding two negative and positive numbers')\n self.assertEqual(calculator_simple.add(-6, -4), -10, 'Error when adding two negative numbers')\n\n def test_multiply(self):\n \"\"\"\n Tests for the multiply() function.\n \"\"\"\n self.assertEqual(calculator_simple.multiply(6, 4), 24)\n self.assertEqual(calculator_simple.multiply(6, -4), -24)\n self.assertEqual(calculator_simple.multiply(-6, 4), -24)\n self.assertEqual(calculator_simple.multiply(-6, -4), 24)\n\n def test_subtract(self):\n \"\"\"\n Tests for the subtract() function.\n \"\"\"\n self.assertEqual(calculator_simple.subtract(6, 4), 2)\n self.assertEqual(calculator_simple.subtract(6, -4), 10)\n self.assertEqual(calculator_simple.subtract(-6, 4), -10)\n self.assertEqual(calculator_simple.subtract(-6, -4), -2)\n\n def test_divide(self):\n \"\"\"\n Tests for the divide() function.\n \"\"\"\n self.assertEqual(calculator_simple.divide(10, 2), 5)\n self.assertEqual(calculator_simple.divide(10, -2), -5)\n self.assertEqual(calculator_simple.divide(-10, 2), -5)\n self.assertEqual(calculator_simple.divide(-10, -2), 5)\n\n self.assertRaises(ValueError, calculator_simple.divide, 5, 0) # 1 metoda, ValueError jest wartością oczekiwaną, potem argumenty\n with self.assertRaises(ValueError): # 2 metoda wywołujemy błąd\n calculator_simple.divide(5, 0)\n #self.assertEqual(calculator.divide(10, 0), 0) # zamiast litery F zwóci literę E,\n #test zgłasza wyjątek inny niż błąd AssertionError (ValueError) - nieprawidłowość jest, ale nie uważa się, aby test został oblany\n\nif __name__ == \"__main__\": # jeśli nie chcemy odpalać z linii komend\n unittest.main()\n" }, { "alpha_fraction": 0.5979093909263611, "alphanum_fraction": 0.608013927936554, "avg_line_length": 38.43661880493164, "blob_id": "7c34c89630298ee6af37ade1b6018e63561a221d", "content_id": "d83ad488de48fa1cc638b8fa66f4ebb07c3d406b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5746, "license_type": "no_license", "max_line_length": 128, "num_lines": 142, "path": "/calculator_GUI.py", "repo_name": "Merigold89/Testing-trial", "src_encoding": "UTF-8", "text": "# from __future__ import unicode_literal # obsługa polskich liter\r\nimport calculator\r\nfrom PyQt5.QtWidgets import QApplication, QWidget # importujemy klasy z modulu\r\nfrom PyQt5.QtGui import QIcon\r\nfrom PyQt5.QtWidgets import QLabel, QGridLayout\r\nfrom PyQt5.QtWidgets import QLineEdit, QPushButton, QHBoxLayout # linia edyccji, przycisk\r\nfrom PyQt5.QtWidgets import QMessageBox # tworzy komunikaty\r\nfrom PyQt5.QtCore import Qt # zawiera stale\r\n\r\n\r\nclass calculator_window(QWidget): # klasa dziedziczaca\r\n def __init__(self, parent=None): # zwraca nam klase rodzica i wywoluje jego konstruktor\r\n super().__init__(parent)\r\n\r\n self.interfejs() # wywolujemy konstruktor / klasa interfejs\r\n\r\n def interfejs(self):\r\n \"\"\"\r\n - etykiety\r\n - przypisanie widgetow do ukladu tabelarycznego\r\n - przypisanie utworzonego ukladu do okna\r\n - widzeta\r\n - okna do wprowadzenia danych\r\n - przyciski działań matematycznych\r\n - zwiazanie przyciskow do funkcji operacji\r\n \"\"\"\r\n\r\n label1 = QLabel(\"Number 1:\", self) # etykiety\r\n label2 = QLabel(\"Number 2:\", self)\r\n label3 = QLabel(\"Result:\", self)\r\n\r\n layoutT = QGridLayout() # QGridLayout - przypisanie umiejscowienia widgetow do ukladu tabelarycznego\r\n layoutT.addWidget(label1, 0, 0) # addWidget - przypisanie etykiety do okna aplikacji\r\n layoutT.addWidget(label2, 0, 1)\r\n layoutT.addWidget(label3, 0, 2)\r\n\r\n self.setLayout(layoutT) # przypisanie utworzonego układu do okna aplikacji\r\n\r\n self.number1Edt = QLineEdit() # 1-liniowe pola edycyjne\r\n self.number2Edt = QLineEdit()\r\n self.resultEdt = QLineEdit()\r\n\r\n self.resultEdt.readonly = True # mozliwosc odczytu pola tekstowego\r\n self.resultEdt.setToolTip(\"\") # ustala odpowiedz\r\n\r\n layoutT.addWidget(self.number1Edt, 1, 0)\r\n layoutT.addWidget(self.number2Edt, 1, 1)\r\n layoutT.addWidget(self.resultEdt, 1, 2)\r\n\r\n addBtn = QPushButton(\"&Add\", self) # przyciski\r\n subtractBtn = QPushButton(\"&Subtract\", self)\r\n divideBtn = QPushButton(\"&Divide\", self)\r\n multiplyBtn = QPushButton(\"&Multiply\", self)\r\n exitBtn = QPushButton(\"&Exit\", self)\r\n exitBtn.resize(exitBtn.sizeHint()) # sugerowana wielkosc obiektu\r\n\r\n exitBtn.clicked.connect(self.exit) # zwiazanie przycisku \"Exit\" z metoda exit\r\n addBtn.clicked.connect(self.operation)\r\n subtractBtn.clicked.connect(self.operation)\r\n multiplyBtn.clicked.connect(self.operation)\r\n divideBtn.clicked.connect(self.operation)\r\n\r\n layoutH = QHBoxLayout() # uklad horyzontalny\r\n layoutH.addWidget(addBtn)\r\n layoutH.addWidget(subtractBtn)\r\n layoutH.addWidget(divideBtn)\r\n layoutH.addWidget(multiplyBtn)\r\n\r\n layoutT.addLayout(layoutH, 2, 0, 1, 3) # uklad tabelaryczny przyciskow: co chcemy wstawic, wiersze i kolumny wstawiania\r\n layoutT.addWidget(exitBtn, 3, 0, 1, 3)\r\n\r\n self.setGeometry(150, 150, 300, 100) # okresla polozenia okna aplikacji\r\n self.setWindowIcon(QIcon('calculator_icon.png')) # setWindowIcon - stworzenie widzety - ikony kalkulatora\r\n self.setWindowTitle(\"Simple calculator\") # setWindowTitle - opis nagłowka programu\r\n self.show()\r\n\r\n def exit(self):\r\n \"\"\"\r\n Exit the program.\r\n \"\"\"\r\n self.close()\r\n\r\n def closeEvent(self, event): # nazwa funkcji jest bardzo wazna! - nie zmieniac\r\n \"\"\"\r\n Dialog box \"Are you sure you shut up?\" when exiting the program.\r\n \"\"\"\r\n\r\n answer = QMessageBox.question( # tworze okno dialogowe\r\n self, 'message', # naglowek okna\r\n \"Are you sure you want to close?\", # pytanie w oknie\r\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No) # komibancja przyciskow \"YES' i \"NO\" oraz przycisku domyslnego\r\n\r\n if answer == QMessageBox.Yes:\r\n event.accept()\r\n else:\r\n event.ignore()\r\n\r\n def keyPressEvent(self, e): # nazwa funkcji jest bardzo wazna! - nie zmieniac\r\n \"\"\"\r\n Close the program with the ESC button.\r\n \"\"\"\r\n if e.key() == Qt.Key_Escape:\r\n self.close()\r\n\r\n def operation(self):\r\n \"\"\"\r\n Entered data is sent back to the calculation module (calculator.py) and the result is obtained back.\r\n \"\"\"\r\n user = self.sender()\r\n\r\n\r\n try: # jesli wprowadzone zmienne nie da rady zmienic na float wyswietl blad\r\n number1 = float(self.number1Edt.text())\r\n number2 = float(self.number2Edt.text())\r\n result = \"\"\r\n self.calc = calculator.Calculator(number1, number2)\r\n\r\n if user.text() == \"&Add\":\r\n result = self.calc.add()\r\n elif user.text() == \"&Subtract\":\r\n result = self.calc.subtract()\r\n elif user.text() == \"&Multiply\":\r\n result = self.calc.multiply()\r\n else: # dzielenie\r\n try:\r\n result = round(self.calc.divide(), 9)\r\n except ZeroDivisionError:\r\n QMessageBox.critical(\r\n self, \"Error\", \"Can not divide by zero!\")\r\n return\r\n\r\n self.resultEdt.setText(str(result)) # wyswietlanie wyniku w oknie \"Result\"\r\n\r\n except ValueError:\r\n QMessageBox.warning(self, \"Error\", \"Incorrect data\", QMessageBox.Ok)\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n\r\n app = QApplication(sys.argv) # reprezentuje aplikacje\r\n window = calculator_window()\r\n sys.exit(app.exec_()) # głowna petla programu" } ]
4
jiangchb/pipelines
https://github.com/jiangchb/pipelines
6f346635c7fb7d7e04636a7a583d5b5d7c5c47f1
803e158ed46213fb5ccbe6ba7894f26bc4392aae
e1bffd2e8e51c041e99321cac52374d2a8d599bc
refs/heads/master
2021-01-11T18:50:36.954652
2015-03-10T21:49:22
2015-03-10T21:49:22
79,637,331
0
1
null
2017-01-21T09:48:45
2015-03-10T21:49:27
2015-03-10T21:49:27
null
[ { "alpha_fraction": 0.5873813629150391, "alphanum_fraction": 0.624790608882904, "avg_line_length": 23.875, "blob_id": "9b8ddf5fe3e7cd0c03607fa705368dcbd237fb68", "content_id": "ebdc22b782b72fe0e916adbe82ed195d72922f98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1791, "license_type": "no_license", "max_line_length": 135, "num_lines": 72, "path": "/SubsampleFastq/subsample.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/bash -e\n\nDIR=$(dirname $0)\nVERSION=\"0.0.1\"\n\nUSAGEMSG=\"usage: $(basename $0) -s FASTQSUFFIX -v FASTQ1 FASTQ2 RECORDS OUTDIR\n\nWill overwrite the \nSubsample paired-end fastq files:\n\nHiC assuming merged bam and properly sets flag\n~/subsample.sh GMall_Ncol_R1.fastq.gz GMall_Ncol_R2.fastq.gz 10000 ~/tmp/\n\nAuthor: Fabian Buske\ninspired from https://www.biostars.org/p/76791/\nVersion: $VERSION\n\n* FASTQ1 - read1 fastq file\n* FASTQ2 - read2 fastq file\n* RECORDS - number of records to subsample\n* FASTQSUFFIX - file suffix e.g. fastq or fastq.gz\n* OUTDIR - where to put the data\n\"\n\n[ $# -lt 3 ] && echo \"$USAGEMSG\" >&2 && exit 1\nOUTDIR=\nFASTQSUFFIX=\n\nwhile getopts \"o:s:i:v\" opt;\ndo\n\tcase ${opt} in\n\t s) FASTQSUFFIX=\"$OPTARG\";;\n v) VERBOSE=\"--verbose\";;\n \\?) print >&2 \"$0: error - unrecognized option $1\"\n exit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nFASTQ1=$1\nFASTQ2=$2\nRECORDS=$3\nOUTDIR=$4\n\nmkdir -p $OUTDIR\n\n#is ziped ?\nCAT=\"cat\"\nif [[ ${FASTQ1##*.} == \"gz\" ]]; then \n CAT=\"zcat\";\nelif [[ ${FASTQ1##*.} == \"bz2\" ]]; \n then CAT=\"bzcat\"; \nfi\n\nif [ -z \"$OUTDIR\" ]; then\n OUTDIR=$(dirname $FASTQ1)\nfi\n\nSAMPLE1=${FASTQ1##*/}\nSAMPLE2=${FASTQ2##*/}\necho $OUTDIR/${SAMPLE1/%$FASTQSUFFIX/.tmp}\n\n\npaste <($CAT $FASTQ1) <($CAT $FASTQ2) |\\\nawk '{ printf(\"%s\",$0); n++; if(n%4==0) { printf(\"\\n\");} else { printf(\"\\t\\t\");} }' |\\\nshuf -n $RECORDS |\\\nsed 's/\\t\\t/\\n/g' |\\\nawk -F'\\t' -v FN1=$OUTDIR/${SAMPLE1/%$FASTQSUFFIX/.tmp} -v FN2=$OUTDIR/${SAMPLE2/%$FASTQSUFFIX/.tmp} '{print $1 > FN1; print $2 > FN2}'\ngzip -9 -c $OUTDIR/${SAMPLE1/%$FASTQSUFFIX/.tmp} > $OUTDIR/${SAMPLE1/%$FASTQSUFFIX/fastq.gz}\ngzip -9 -c $OUTDIR/${SAMPLE2/%$FASTQSUFFIX/.tmp} > $OUTDIR/${SAMPLE2/%$FASTQSUFFIX/fastq.gz}\n\nrm $OUTDIR/${SAMPLE1/%$FASTQSUFFIX/.tmp} $OUTDIR/${SAMPLE2/%$FASTQSUFFIX/.tmp}\n" }, { "alpha_fraction": 0.6621417999267578, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 35.83333206176758, "blob_id": "44b25ba4bef205414a3e9fa17a77b0ac60f5f519", "content_id": "37583961dc47e919e064b3cb6f6d5e1b274d1754", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 1326, "license_type": "no_license", "max_line_length": 120, "num_lines": 36, "path": "/Chromablocks/chromablocks.R", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "library(GenomicFeatures)\nlibrary(Repitools)\nlibrary(chipseq)\nlibrary(BSgenome.Hsapiens.UCSC.hg19)\nlibrary(multicore)\nlibrary(rtracklayer)\noptions(cores=4)\n\n# expects two bam files (signal, input) and run chromablocks \n# expects HG19 assembly\n\nargs <- commandArgs(trailingOnly = TRUE)\n\nchip_arg<-args[1]\ninput_arg<-args[2]\noutput_arg<-args[3]\n\nchromacall <- function(signal, input, output) \n{\n\tip_name <- sub(\"[.][^.]*$\", \"\", basename(signal))\n\tin_name <- sub(\"[.][^.]*$\", \"\", basename(input))\n\trs<-BAM2GRangesList(c(\"ip\"=signal, \"input\"=input))\n\n\tchroma.sm <- ChromaBlocks(rs.ip=rs[1], rs.input=rs[2], Hsapiens, 1:24, preset=\"small\")\n save(chroma.sm,file=file.path(output, paste(paste(ip_name,in_name,sep='-'),\"sm\",\"rda\",sep='.')))\t\n\tchroma.sm.regions <- regions(chroma.sm)\n\texport(chroma.sm.regions, con=file.path(output,paste(paste(ip_name,in_name,sep='-'),\"sm\",\"bed\",sep='.')), format=\"bed\")\n\n\tchroma.lg <- ChromaBlocks(rs.ip=rs[1], rs.input=rs[2], Hsapiens, 1:24, preset=\"large\")\n save(chroma.lg,file=file.path(output, paste(paste(ip_name,in_name,sep='-'),\"lg\",\"rda\",sep='.')))\n\tchroma.lg.regions <- regions(chroma.lg)\n\texport(chroma.lg.regions, con=file.path(output,paste(paste(ip_name,in_name,sep='-'),\"lg\",\"bed\",sep='.')), format=\"bed\")\n\n}\n\nchromacall(signal=chip_arg, input=input_arg, output=output_arg)\n" }, { "alpha_fraction": 0.5785076022148132, "alphanum_fraction": 0.590929388999939, "avg_line_length": 48.290748596191406, "blob_id": "80762e556a1480776a2350f9cf1079e4f9c49bad", "content_id": "ca1f4da5fba37a1b77ba52aa7292a03f3751ff41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 22380, "license_type": "no_license", "max_line_length": 310, "num_lines": 454, "path": "/segway/run_segway.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nUSAGEMSG=\"usage: $(basename $0) CONFIGFILE\n\nStarts the segway pipeline given the config file\n\nAuthor: Fabian Buske\n\nRequirements (modules):\n\tgi/gcc/4.8.2\n\tfabbus/segway_gbr/1.2.0 (with segtools 1.1.7) \n\tBEDtools (genomeCoverageBed) in path\n\tSamtools gi/samtools\n\n* CONFIGFILE - the config file describing the joba\n* -1 step 1 - collect the bam data from gagri\n* -2 step 2 - groom the bam data into bedGraph format\n* -3 step 3 - put the data as tracks into a genomedata archive\n* -4 step 4 - train the segway model\n* -5 step 5 - predict using the segway model\n* -6 step 6 - evaluate the output using segtools\n* -f force - overwrite existing results\n* -a armed - trigger the jobs after writing the scripts\n* -v - print progress information (verbose).\n\"\n\n\nPIPELINEDIR=`dirname $0`\nVERSION=\"0.0.2\"\n\nDO_TRANSFERDATA=\"\"\nDO_CONVERTDATA2BEDGRAPH=\"\"\nDO_GENERATEARCHIVE=\"\"\nDO_TRAINSEGWAY=\"\"\nDO_PREDICTSEGWAY=\"\"\nDO_EVALUATE=\"\"\nCLOBBER=\"\"\n\nARMED=\"FALSE\"\nOVERWRITEALL=\"FALSE\"\n\n[ $# -lt 1 ] && echo \"$USAGEMSG\" >&2 && exit 1\n\nwhile getopts \"123456afv\" opt;\ndo\n case ${opt} in\n 1) DO_TRANSFERDATA=\"TRUE\";;\n 2) DO_CONVERTDATA2BEDGRAPH=\"TRUE\";;\n 3) DO_GENERATEARCHIVE=\"TRUE\";;\n 4) DO_TRAINSEGWAY=\"TRUE\";;\n 5) DO_PREDICTSEGWAY=\"TRUE\";;\n 6) DO_EVALUATE=\"TRUE\";;\n a) ARMED=\"TRUE\";;\n f) OVERWRITEALL=\"TRUE\";;\n v) VERBOSE=\"--verbose\";;\n \\?) print >&2 \"$0: error - unrecognized option $1\"\n exit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nCONFIG=$1\n\nsource ${CONFIG}\n\nif [ \"$OVERWRITEALL\" = \"TRUE\" ];then\n\tCLOBBER=\"--clobber \"\nfi \n\nif [ -d ${TARGETDIR}/ ]; then\n\techo \"[WARN] target directory exists already\"\nelse\n\tmkdir -p ${TARGETDIR}/\nfi\n\n#\n# data folder (to put or source the data from)\n# Important: add final /\n#\nSEGWAY_DATA=${TARGETDIR}/data/\nSEGWAY_BIN=${TARGETDIR}/bin/\nSEGWAY_QOUT=${TARGETDIR}/qout/\nSEGWAY_RESULT=${TARGETDIR}/result_${LABELS}/\nSEGWAY_TRAIN=${SEGWAY_RESULT}/train-${EXPERIMENT}\nSEGWAY_PREDICT=${SEGWAY_RESULT}/predict-${EXPERIMENT}${PREDICTION}\n\n# some housekeeping\nmkdir -p $SEGWAY_DATA $SEGWAY_BIN $SEGWAY_QOUT $SEGWAY_RESULT\ncp ${REGIONS} ${SEGWAY_DATA}\nTRAIN_REGIONS=${SEGWAY_DATA}$(basename ${REGIONS})\n\nmodule load gi/ucsc_utils fabbus/segway_gbr gi/samtools\n##\n## collect the data (tracks) from gagri\n##\nif [ -n \"$DO_TRANSFERDATA\" ]; then\n\t# get all files\n\techo \"echo 'collect data tracks from Gagri'\" > ${SEGWAY_BIN}/1_cdata.sh\n\techo \"module load gi/samtools\" > ${SEGWAY_BIN}/1_cdata.sh\n\twhile IFS=$'\\t' read -r -a DATA; do\n FILES_SOURCE=${DATA[2]}\n for F in $(echo ${DATA[3]} | tr ',' '\\n'); do\n echo \"echo 'datafile ${F}${DATA[4]}'\" >> ${SEGWAY_BIN}/1_cdata.sh\n if [[ ! -f ${SEGWAY_DATA}/${DATA[4]} ]] || [ $OVERWRITEALL == 'TRUE' ]; then \n echo \"[ ! -f ${SEGWAY_DATA}/$F${DATA[4]} ] && smbclient //Gagri/diverse -A ~/.smbclient -c 'cd ${FILES_SOURCE}; get ${F}${DATA[4]}' && mv ${F}${DATA[4]} ${SEGWAY_DATA}\" >> ${SEGWAY_BIN}/1_cdata.sh\n if [[ \"${DATA[4]}\" =~ \".bam\" ]] && [[ ! -f ${SEGWAY_DATA}/$F${DATA[4]}.bai ]]; then\n \t echo \"[ ! -f ${SEGWAY_DATA}/$F${DATA[4]}.bai ] && smbclient //Gagri/diverse -A ~/.smbclient -c 'cd ${FILES_SOURCE}; get ${F}${DATA[4]}.bai' && mv ${F}${DATA[4]}.bai ${SEGWAY_DATA}\" >> ${SEGWAY_BIN}/1_cdata.sh\n \t echo \"[ ! -f ${SEGWAY_DATA}/$F${DATA[4]}.bai ] && samtools index ${SEGWAY_DATA}/$F${DATA[4]}\" >> ${SEGWAY_BIN}/1_cdata.sh\n \t fi\n\t fi\n done\n done < $EXPERIMENTS\n\n\tchmod 777 ${SEGWAY_BIN}/1_cdata.sh\n\tif [ $ARMED = \"TRUE\" ]; then\n ${SEGWAY_BIN}/1_cdata.sh\n\tfi\nfi\n\n## \n## transform the bam data into bedgraph\n##\nif [ -n \"$DO_CONVERTDATA2BEDGRAPH\" ]; then\n\tif [ ! -d ${SEQDIR} ] || [ ! -d ${WIGGLER_UMAPDIR} ]; then\n\t\techo \"sequence dir or umap dir is missing/invalid\"\n\t\techo \"seqDir: ${SEQDIR}\"\n\t\techo \"umapDIR:${WIGGLER_UMAPDIR}\"\n\t\texit 1\n\tfi\n\t\n\tmodule load gi/bedtools gi/samtools fabbus/wiggler/2.0 gi/ucsc_utils gi/pigz\n\techo \"module load gi/bedtools gi/samtools fabbus/wiggler/2.0 gi/ucsc_utils gi/pigz\" > ${SEGWAY_BIN}/2_tdata.sh\n#\techo \"echo 'get chromosome sizes for ${GENOME}'\" >> ${SEGWAY_BIN}/2_tdata.sh\n\n\twhile IFS=$'\\t' read -r -a DATA; do\n if [[ \"${DATA[4]}\" =~ \".bam\" ]]; then\n FRAGMENTS=\"\"\n INPUTS=\"\"\n REPNAMES=\"\"\n for REPLICA in $(echo ${DATA[3]} | tr ',' '\\n'); do\n \t [ ! -f ${SEGWAY_DATA}${REPLICA}${DATA[4]} ] && echo \"[ERROR] file not found: ${SEGWAY_DATA}${REPLICA}${DATA[4]}\" && exit 1\n \t\tINPUTS=\"${INPUTS} -i=${SEGWAY_DATA}${REPLICA}${DATA[4]}\"\n if [[ $(samtools view ${SEGWAY_DATA}${REPLICA}${DATA[4]} | awk '{print length($10)}' | head -1000 | sort -u | head -n 1) -ge 100 ]]; then\t\n echo \"using $WIGGLER_UMAPDIR_ge100\"\n WIGGLER_UMAP=$WIGGLER_UMAPDIR_ge100\n else\n echo \"using $WIGGLER_UMAPDIR_lt100\"\n WIGGLER_UMAP=$WIGGLER_UMAPDIR_lt100\n fi\n done\n for FRAGSIZE in $(echo ${DATA[5]} | tr ',' '\\n'); do\n FRAGMENTS=\"${FRAGMENTS} -l=${FRAGSIZE}\"\n done\n \n \t\tREPNAMES=${DATA[0]}\"_\"${DATA[1]}\n echo $REPNAMES\n \n if [ ! -f ${SEGWAY_DATA}${REPNAMES}.bg.gz ] || [ \"$OVERWRITEALL\" = \"TRUE\" ]; then\n [ -f ${SEGWAY_QOUT}TrDa-${REPNAMES}.out ] && rm ${SEGWAY_QOUT}TrDa-${REPNAMES}.out\n [ -f ${SEGWAY_DATA}${REPNAMES}.bg.gz ] && rm ${SEGWAY_DATA}${REPNAMES}.bg.gz\n echo '#!/bin/bash' > ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n echo 'echo job_id $JOB_ID startdata $(date)' >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n echo \"echo convert ${REPNAMES} to bedGraph using wiggler\" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n # wiggler\n \n echo \"align2rawsignal -of=bg ${INPUTS} ${FRAGMENTS} -s=${SEQDIR} -u=${WIGGLER_UMAP} -n=5 -v=${SEGWAY_QOUT}wiggler-${REPNAMES}.log -k=tukey -w=${DATA[6]} -o=${SEGWAY_DATA}${REPNAMES}.bg\" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh \n##\t\t apply asinh transformation to all signal values (done via (--distribution=asinh_norm) by default\n#\t\techo \"[ -f -c ${SEGWAY_DATA}${REPNAMES}.bg.gz ] && rm -c ${SEGWAY_DATA}${REPNAMES}.bg.gz\" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n#\t\techo \"cat ${SEGWAY_DATA}${REPNAMES}.bg | awk 'function asinh(x){return log(x + sqrt(x*x +1))}{OFS=\\\"\\\\t\\\";print \\$1,\\$2,\\$3,asinh(\\$4)}' | pigz -9 -c > ${SEGWAY_DATA}${REPNAMES}.bg.gz \" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n#\t\techo \"rm ${SEGWAY_DATA}${REPNAMES}.bg\" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n\t\techo \"pigz -9 ${SEGWAY_DATA}${REPNAMES}.bg\" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n echo 'echo job_id $JOB_ID ending $(date)' >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n chmod 777 ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n \n # submit\n echo \"qsub -pe smp 4 -V -cwd -l h_rt=24:00:00 -j y -m e -S /bin/bash -o ${SEGWAY_QOUT}TrDa-${REPNAMES}.out -N TrDa-${REPNAMES} ${SEGWAY_BIN}/tdata${REPNAMES}.sh\" >> ${SEGWAY_BIN}/2_tdata.sh\n fi\n elif [[ \"${DATA[4]}\" =~ \".bw\" ]]; then\n if [ \"$(echo ${DATA[3]} | tr ',' '\\n' | wc -l | cut -f 1)\" -gt 1 ]; then\n echo \"[ERROR] replicates for bigwigs not supported\"\n exit 1\n fi\n\n \t REPNAMES=${DATA[0]}\"_\"${DATA[1]}\n echo $REPNAMES\n \tif [ ! -f ${SEGWAY_DATA}${REPNAMES}.bg.gz ] || [ \"$OVERWRITEALL\" = \"TRUE\" ]; then\n [ -f ${SEGWAY_QOUT}TrDa-${REPNAMES}.out ] && rm ${SEGWAY_QOUT}TrDa-${REPNAMES}.out\n echo '#!/bin/bash' > ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n echo 'echo job_id $JOB_ID startdata $(date)' >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n echo \"echo convert ${REPNAMES} bigwig to bedGraph \" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n # wiggler\n echo \"bigWigToBedGraph ${SEGWAY_DATA}${DATA[3]}${DATA[4]} ${SEGWAY_DATA}${REPNAMES}.bg\" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh \n echo \"pigz -9 ${SEGWAY_DATA}${REPNAMES}.bg\" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh \n echo 'echo job_id $JOB_ID ending $(date)' >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n chmod 777 ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n \n # submit\n echo \"qsub -pe smp 4 -V -cwd -l h_rt=24:00:00 -j y -m e -S /bin/bash -o ${SEGWAY_QOUT}TrDa-${REPNAMES}.out -N TrDa-${REPNAMES} ${SEGWAY_BIN}/tdata${REPNAMES}.sh\" >> ${SEGWAY_BIN}/2_tdata.sh\n fi\n\n elif [[ \"${DATA[4]}\" =~ \".bg\" ]]; then\n if [ \"$(echo ${DATA[3]} | tr ',' '\\n' | wc -l | cut -f 1)\" -gt 1 ]; then\n echo \"[ERROR] replicates for begraphs not supported\"\n exit 1\n fi\n \t\tREPNAMES=${DATA[0]}\"_\"${DATA[1]}\n echo $REPNAMES\n \tif [ ! -f ${SEGWAY_DATA}${REPNAMES}.bg.gz ] || [ \"$OVERWRITEALL\" = \"TRUE\" ]; then\n [ -f ${SEGWAY_QOUT}TrDa-${REPNAMES}.out ] && rm ${SEGWAY_QOUT}TrDa-${REPNAMES}.out\n echo '#!/bin/bash' > ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n echo 'echo job_id $JOB_ID startdata $(date)' >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n echo \"echo convert ${REPNAMES} bigwig to bedGraph \" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n # wiggler\n echo \"pigz -9 -c ${SEGWAY_DATA}${DATA[3]}${DATA[4]} > ${SEGWAY_DATA}${REPNAMES}.bg.gz\" >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh \n echo 'echo job_id $JOB_ID ending $(date)' >> ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n chmod 777 ${SEGWAY_BIN}/tdata${REPNAMES}.sh\n \n # submit\n echo \"qsub -pe smp 4 -V -cwd -l h_rt=24:00:00 -j y -m e -S /bin/bash -o ${SEGWAY_QOUT}TrDa-${REPNAMES}.out -N TrDa-${REPNAMES} ${SEGWAY_BIN}/tdata${REPNAMES}.sh\" >> ${SEGWAY_BIN}/2_tdata.sh\n fi\n fi\n done < $EXPERIMENTS\n\n\tchmod 777 ${SEGWAY_BIN}/2_tdata.sh\n\tif [ $ARMED = \"TRUE\" ]; then\n \t${SEGWAY_BIN}/2_tdata.sh\n fi\nfi\n\n##\n## generate the separate genome archives for the tissues.\n## The archieve is then annotated with the bedgraph data \n##\nif [ -n \"$DO_GENERATEARCHIVE\" ]; then\n mkdir -p ${SEGWAY_DATA}${EXPERIMENT}.genomedata\n\tif [ ! -n \"$CHROMSIZES\" ];then\n\t\techo \"[ERROR] Chromosome sizes not given: $CHROMSIZES\"\n\t\texit 1\n\tfi\n\t## load module\n\tmodule load fabbus/segway_gbr\n\techo \"module load fabbus/segway_gbr\" > ${SEGWAY_BIN}/3_gdata.sh\n\techo \"[ -f ${SEGWAY_QOUT}/GnDt-${EXPERIMENT}.out ] && rm ${SEGWAY_QOUT}/GnDt-${EXPERIMENT}.out\" >> ${SEGWAY_BIN}/3_gdata.sh\n\n for CHR in $(cut -f 1 ${CHROMSIZES}); do\n if [ ! -f $SEQDIR/$CHR$FASTASUFFIX ]; then \n echo \"[ERROR] chr file not found: $SEQDIR/$CHR$FASTASUFFIX\"\n exit 1\n fi\n echo $SEQDIR/$CHR$FASTASUFFIX\n \n \techo 'echo job_id $JOB_ID startdata $(date)' > ${SEGWAY_BIN}/gdata${EXPERIMENT}${CHR}.sh\n \t# genomedata-load call\n \techo \"echo '*** create genomedata archive'\" >> ${SEGWAY_BIN}/gdata${EXPERIMENT}${CHR}.sh\n \techo \"genomedata-load -s $SEQDIR/$CHR$FASTASUFFIX \\\\\" >> ${SEGWAY_BIN}/gdata${EXPERIMENT}${CHR}.sh\n \t# add the -t <ID>=<FILE> sections for all tracks\n\n\twhile IFS=$'\\t' read -r -a DATA; do\n REPNAMES=${DATA[0]}\"_\"${DATA[1]}\n if [[ -n \"$USE_ALL_TRACK_DATA\" ]] || [[ \"${DATA[1]}\" == \"$EXPERIMENT\" ]]; then\n echo \"[USE ] $REPNAMES\"\n# b=$(basename $f)\n# \tarrIN=(${REPNAMES//./ })\n echo \"-t\" $REPNAMES=${SEGWAY_DATA}${REPNAMES}.bg.gz\" \\\\\" >> ${SEGWAY_BIN}/gdata${EXPERIMENT}${CHR}.sh\n else\n \t echo \"[SKIP] $REPNAMES\"\n \t fi\n done < $EXPERIMENTS\n # add dinucleotide\n# echo \"-t dinucleotide \\\\\" >> ${SEGWAY_BIN}/gdata${EXPERIMENT}${CHR}.sh\n \n \techo \"${SEGWAY_DATA}${EXPERIMENT}.genomedata\" >> ${SEGWAY_BIN}/gdata${EXPERIMENT}${CHR}.sh\n \techo 'echo job_id $JOB_ID ending $(date)' >> ${SEGWAY_BIN}/gdata${EXPERIMENT}${CHR}.sh\n \tchmod 777 ${SEGWAY_BIN}/gdata${EXPERIMENT}${CHR}.sh\n sed -i 's|//*|/|g' ${SEGWAY_BIN}/gdata${EXPERIMENT}${CHR}.sh\n \t#submit\n \techo \"qsub -pe smp 1 -V -cwd -b y -j y -o ${SEGWAY_QOUT}GnDt-${EXPERIMENT}-${CHR}.out -N GnDt-${EXPERIMENT}-${CHR} ${SEGWAY_BIN}/gdata${EXPERIMENT}${CHR}.sh\" >> ${SEGWAY_BIN}/3_gdata.sh\n done \n\tchmod 777 ${SEGWAY_BIN}/3_gdata.sh\n\n\tif [ $ARMED = \"TRUE\" ]; then\n ${SEGWAY_BIN}/3_gdata.sh\n\tfi\nfi\n\n##\n## train Seqway models\n##\nif [ -n \"$DO_TRAINSEGWAY\" ]; then\n\n module load gi/bedtools\n echo \"module load fabbus/segway_gbr\" > ${SEGWAY_BIN}/4_train.sh\n echo \"[ -f ${SEGWAY_QOUT}SgTrn-${EXPERIMENT}.out ] && rm ${SEGWAY_QOUT}SgTrn-${EXPERIMENT}.out\" >> ${SEGWAY_BIN}/4_train.sh\n echo \"[ -d ${SEGWAY_TRAIN} ] && rm -r ${SEGWAY_TRAIN}\" >> ${SEGWAY_BIN}/4_train.sh\n\n mkdir -p $SEGWAY_DATA/tmp/\n if [ -n \"$EXCLUDABLE\" ]; then\n EXLCUDECOORDS=\"--exclude-coords=$EXCLUDABLE\"\n fi\n \n OPTIONS=\"$MODEL_ADDPARAM --include-coords=$TRAIN_REGIONS --num-labels=${LABELS} $EXLCUDECOORDS $CLUSTEROPT --num-instances=${INSTANCES} ${CLOBBER} ${SEGWAY_TRAIN_ADDPARAM}\"\n echo \"echo '*** train segway'\" >> ${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh\n echo \"segway $OPTIONS \\\\\"> ${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh \n if [[ -n \"$USE_ALL_TRACK_DATA\" ]]; then\n for e in $(cut -f 1 $EXPERIMENTS | sort -u); do\n REPNAMES=$(fgrep -w \"$e\" $EXPERIMENTS | cut -f1,2 | sort -k1,2 | tr '\\t' '_' | tr '\\n' ',' | sed 's/,*$//g')\n echo \"[USE ] $REPNAMES\"\n echo \"--track=$REPNAMES \\\\\" >> ${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh\n done\n \n else\n while IFS=$'\\t' read -r -a DATA; do\n \t REPNAMES=${DATA[0]}\"_\"${DATA[1]}\n if [[ \"${DATA[1]}\" == \"$TRAIN_EXPERIMENT\" ]]; then\n echo \"[USE ] $REPNAMES\"\n echo \"--track=$REPNAMES \\\\\" >> ${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh\n \t else\n \t echo \"[SKIP] $REPNAMES\"\n fi\n done < $EXPERIMENTS\n fi\n ## add dinucleotide\n# echo \"--track=dinucleotide \\\\\" >> ${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh\n echo \"train ${SEGWAY_DATA}${EXPERIMENT}.genomedata ${SEGWAY_TRAIN}\" >> ${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh\n\n chmod 777 ${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh\n #echo \"qsub -l mem_requested=16G -V -cwd -b y -j y -o ${SEGWAY_QOUT}SgTrn0${EXPERIMENT}.out -N SgTrn-${EXPERIMENT} ${SEGWAY_BIN}//segtrain${EXPERIMENT}.sh\" >> ${SEGWAY_BIN}/4_train.sh\n echo \"${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh\" >> ${SEGWAY_BIN}/4_train.sh\n # make sure there is no douple // in any path as segway doesn't like that\n sed -i 's|//*|/|g' ${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh\n \n chmod 777 ${SEGWAY_BIN}/4_train.sh\n\n if [ $ARMED = \"TRUE\" ]; then\n ${SEGWAY_BIN}/4_train.sh\n fi\nfi\n\n##\n## predict using a trained Seqway model\n##\nif [ -n \"$DO_PREDICTSEGWAY\" ]; then\n echo \"module load fabbus/segway_gbr\" > ${SEGWAY_BIN}/5_predict.sh\n echo \"[ -f ${SEGWAY_QOUT}SgPrd-${EXPERIMENT}.out ] && rm ${SEGWAY_QOUT}SgPrd-${EXPERIMENT}.out\" >> ${SEGWAY_BIN}/5_predict.sh\n echo \"[ -d ${SEGWAY_PREDICT} ] && rm -r ${SEGWAY_PREDICT}\" >> ${SEGWAY_BIN}/5_predict.sh\n\n echo 'echo job_id $JOB_ID startdata $(date)' > ${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\n\n echo \"echo '*** predict segmentation'\" >> ${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\n if [ -n \"$EXCLUDABLE\" ]; then\n EXLCUDECOORDS=\"--exclude-coords=$EXCLUDABLE\"\n fi\n echo \"segway $MODEL_ADDPARAM --num-labels=${LABELS} $CLUSTEROPT $EXLCUDECOORDS ${CLOBBER} \\\\\">> ${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\n # add the --track <ID> sections\n if [[ -n \"$USE_ALL_TRACK_DATA\" ]]; then\n for e in $(cut -f 1 $EXPERIMENTS | sort -u); do\n REPNAMES=$(fgrep -w \"$e\" $EXPERIMENTS | cut -f1,2 | sort -k1,2 | tr '\\t' '_' | tr '\\n' ',' | sed 's/,*$//g')\n echo \"[USE ] $REPNAMES\"\n echo \"--track=$REPNAMES \\\\\" >> ${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh\n done\n\n else\n while IFS=$'\\t' read -r -a DATA; do\n\tREPNAMES=${DATA[0]}\"_\"${DATA[1]}\n if [[ \"${DATA[1]}\" == \"$PREDICTION\" ]]; then\n echo \"[USE ] $REPNAMES\"\n echo \"--track=$REPNAMES \\\\\" >> ${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\n \telse\n \t echo \"[SKIP] $REPNAMES\"\n\tfi\n done < $EXPERIMENTS\n fi\n ## add dinucleotide\n# echo \"--track=dinucleotide \\\\\" >> ${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\n echo \"identify ${SEGWAY_DATA}${EXPERIMENT}.genomedata ${SEGWAY_TRAIN} ${SEGWAY_PREDICT}\" >> ${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\n echo 'echo job_id $JOB_ID ending $(date)' >> ${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\n chmod 777 ${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\n # submit\n# echo \"qsub -l mem_requested=16G -V -cwd -b y -j y -o ${SEGWAY_QOUT}SgPrd-${EXPERIMENT}.out -N SgPrd-${EXPERIMENT} ${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\" \n echo \"${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\" >> ${SEGWAY_BIN}/5_predict.sh\n # make sure there is no douple // in any path as segway doesn't like that\n sed -i 's|//*|/|g' ${SEGWAY_BIN}/segpredict${EXPERIMENT}${PREDICTION}.sh\n\n chmod 777 ${SEGWAY_BIN}/5_predict.sh\n \n if [ $ARMED = \"TRUE\" ]; then\n ${SEGWAY_BIN}/5_predict.sh\n fi\nfi\n\n##\n## Evaluate prediction\n##\nif [ -n \"$DO_EVALUATE\" ]; then\n\n echo \"#!/bin/bash -e\" > ${SEGWAY_BIN}/6_evaluate.sh\n echo \"unset module\"\n echo \"module load fabbus/segway_gbr gi/ucsc_utils/283 gi/bedtools fabbus/R\" >> ${SEGWAY_BIN}/6_evaluate.sh\n\n for rf in ${SEGWAY_PREDICT}/segway.[0-9].bed.gz; do \n COUNTER=$(echo $rf | sed 's/.*segway.\\([0-9]\\).bed.gz/\\1/g')\n echo $COUNTER\n\n echo \"#!/bin/bash\" > ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo \"unset module\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo 'echo job_id $JOB_ID startdata $(date)' >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n #preprocess file\n if [ -n $OVERWRITEALL ] || [ ! -f ${SEGWAY_PREDICT}/segway.$COUNTER.bed.gz.pkl.gz ]; then\n echo \"echo '*** preprocess'\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n echo \"segtools-preprocess ${CLOBBER} ${SEGWAY_PREDICT}/segway.$COUNTER.bed.gz\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n fi\n \n echo \"echo '*** length disttribution analysis'\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo \"segtools-length-distribution --outdir=${SEGWAY_RESULT}/length-dist$COUNTER/ ${CLOBBER} ${SEGWAY_PREDICT}/segway.$COUNTER.bed.gz.pkl.gz\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n\n echo \"echo '*** signal distribution analysis'\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo \"segtools-signal-distribution --noplot --quiet --outdir=${SEGWAY_RESULT}/signal-dist$COUNTER/ ${CLOBBER} ${SEGWAY_PREDICT}/segway.$COUNTER.bed.gz.pkl.gz ${SEGWAY_DATA}${EXPERIMENT}.genomedata\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n \n echo \"echo '*** nucleotide frequency analysis'\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo \"segtools-nucleotide-frequency --outdir=${SEGWAY_RESULT}/nucleotide_freg$COUNTER/ ${CLOBBER} ${SEGWAY_PREDICT}/segway.$COUNTER.bed.gz.pkl.gz ${SEGWAY_DATA}${EXPERIMENT}.genomedata\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n\n echo \"echo '*** transition analysis'\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo \"segtools-transition --outdir=${SEGWAY_RESULT}/transition$COUNTER/ ${CLOBBER} ${SEGWAY_PREDICT}/segway.$COUNTER.bed.gz.pkl.gz\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n\n echo \"echo '*** gene aggregation analysis'\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo \"segtools-aggregation --normalize --mode=gene --outdir=${SEGWAY_RESULT}/gencode-agg$COUNTER/ ${CLOBBER} ${SEGWAY_PREDICT}/segway.$COUNTER.bed.gz.pkl.gz ${ANNOTATION}\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n \n echo \"echo '*** gmtk parameter generation'\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo \"segtools-gmtk-parameters --noplot --quiet --outdir=${SEGWAY_RESULT}/gtmk-param$COUNTER/ ${CLOBBER} ${SEGWAY_TRAIN}/params/params.params\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n \n echo \"echo '*** html report generation'\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo \"cd ${SEGWAY_RESULT}/\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo \"segtools-html-report -L ${SEGWAY_PREDICT}/segway.$COUNTER.layered.bed.gz --results-dir=${SEGWAY_RESULT}/ -o segtools$COUNTER.html ${SEGWAY_PREDICT}/segway.$COUNTER.bed.gz.pkl.gz ${CLOBBER}\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n echo \"sed 's|${SEGWAY_RESULT}/||g' segtools$COUNTER.html > segtools$COUNTER.html2\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n echo \"mv segtools$COUNTER.html2 segtools$COUNTER.html\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n echo \"cd $(pwd)\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n echo \"for LABEL in \\$(seq 0 $(( $LABELS - 1 )) ); do zcat ${SEGWAY_PREDICT}/segway.$COUNTER.bed.gz | tail -n+2 | awk -v label=\\$LABEL '{if (\\$4 == label){OFS=\\\"\\t\\\"; print \\$1,\\$2,\\$3,\\$4}}' | bedtools sort > ${SEGWAY_PREDICT}/segway.$COUNTER.\\$LABEL.bed\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n echo \"bedToBigBed -type=bed4 ${SEGWAY_PREDICT}/segway.$COUNTER.\\$LABEL.bed $CHROMSIZES ${SEGWAY_PREDICT}/$EXPERIMENT.$COUNTER.\\$LABEL.bb\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n echo \"rm ${SEGWAY_PREDICT}/segway.$COUNTER.\\$LABEL.bed\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n echo \"done\" >> ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh \n # make sure there is no douple // in any path as segway doesn't like that\n sed -i 's|//*|/|g' ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n \n chmod 777 ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\n \n echo \"qsub -l mem_requested=16G -S /bin/bash -V -cwd -b y -j y -o ${SEGWAY_QOUT}SgEva-${EXPERIMENT}$COUNTER.out -N SgEva-${EXPERIMENT}$COUNTER ${SEGWAY_BIN}/segeval${EXPERIMENT}$COUNTER.sh\" >> ${SEGWAY_BIN}/6_evaluate.sh\n \n done\n\n chmod 777 ${SEGWAY_BIN}/6_evaluate.sh\n if [ $ARMED = \"TRUE\" ]; then\n ${SEGWAY_BIN}/6_evaluate.sh\n fi\nfi\n\n\n" }, { "alpha_fraction": 0.5928449630737305, "alphanum_fraction": 0.6153321862220764, "avg_line_length": 23.057376861572266, "blob_id": "016bf4ab263e2d2294c4bafdec64b82e13e14c85", "content_id": "5da53d4a2b6471a01385f79c83e62f67eb36329c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2935, "license_type": "no_license", "max_line_length": 179, "num_lines": 122, "path": "/HiC/run_HiC.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nUSAGEMSG=\"usage: $(basename $0) CONFIGFILE\n\nStarts the segway pipeline given the config file\n\nAuthor: Fabian Buske\n\nRequirements (modules):\n\tmodule fabbus/segway/1.1.0 \n\tBEDtools (genomeCoverageBed, fetchChromSizes) in path\n\n* CONFIGFILE - the config file describing the joba\n* -1 step 1 - collect the bam data from gagri\n* -2 step 2 - run FastQC on data\n* -3 step 3 - map data using bowtie\n* -4 step 4 - \n* -5 step 5 - \n* -6 step 6 - \n* -f force - overwrite existing results\n* -a armed - trigger the jobs after writing the scripts\n* -v - print progress information (verbose).\n\"\n\n\nPIPELINEDIR=`dirname $0`\nVERSION=\"0.0.1\"\n\nDO_TRANSFERDATA=\"\"\nDO_FASTQC=\"\"\nDO_MAPPING=\"\"\nDO_TRAINSEGWAY=\"\"\nDO_PREDICTSEGWAY=\"\"\nDO_EVALUATE=\"\"\nCLOBBER=\"\"\n\nARMED=\"FALSE\"\nOVERWRITEALL=\"FALSE\"\n\n[ $# -lt 1 ] && echo \"$USAGEMSG\" >&2 && exit 1\n\nwhile getopts \"123456afv\" opt;\ndo\n case ${opt} in\n\t1) DO_TRANSFERDATA=\"TRUE\";;\n\t2) DO_FASTQC=\"TRUE\";;\n\t3) DO_MAPPING=\"TRUE\";;\n\t4) DO_TRAINSEGWAY=\"TRUE\";;\n\t5) DO_PREDICTSEGWAY=\"TRUE\";;\n\t6) DO_EVALUATE=\"TRUE\";;\n a) ARMED=\"TRUE\";;\n f) OVERWRITEALL=\"TRUE\";;\n v) VERBOSE=\"--verbose\";;\n \\?) print >&2 \"$0: error - unrecognized option $1\"\n exit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nCONFIG=$1\n\nsource ${CONFIG}\n\nif [ \"$OVERWRITEALL\" = \"TRUE\" ];then\n\tCLOBBER=\"--clobber \"\nfi \n\nif [ -d ${TARGETDIR} ]; then\n\techo \"[WARN] target directory exists already\"\nelse\n\tmkdir -p ${TARGETDIR}\nfi\n\n#\n# data folder (to put or source the data from)\n# Important: add final /\n#\nHIC_DATA=${TARGETDIR}data/\nHIC_BIN=${TARGETDIR}bin/\nHIC_QOUT=${TARGETDIR}qout/\nHIC_RESULT=${TARGETDIR}result/\n\n# some housekeeping\nmkdir -p $HIC_DATA $HIC_BIN $HIC_QOUT $HIC_RESULT\n\n##\n## collect the data (tracks) from gagri\n##\nif [ -n \"$DO_TRANSFERDATA\" ]; then\n\t# get all files\n\techo \"echo 'collect data tracks from gagri'\" > ${HIC_BIN}1_cdata.sh\n\tFILEARR=$(echo ${FASTQ} | tr \" ,\" \"\\n\")\n\tfor F in ${FILEARR}; do\n\t\tFN=${F##*/}\n FB=${FN%.*}\n\t echo \"echo 'datafile ${F}'\" >> ${HIC_BIN}1_cdata.sh\n \techo \"smbclient \\\\\\\\\\\\\\\\gagri\\\\\\\\GRIW -A ~/.smbclient -c 'prompt; recurse; cd ${RAW_FILES_SOURCE}${FB}; mget ${F}*.gz' && mv ${F}*.gz ${HIC_DATA}\" >> ${HIC_BIN}1_cdata.sh\n\tdone\n\n\tchmod 777 ${HIC_BIN}1_cdata.sh\n\tif [ $ARMED = \"TRUE\" ]; then\n\t ${HIC_BIN}1_cdata.sh\n\tfi\nfi\n\n## \n## transform the bam data into bedgraph\n##\nif [ -n \"$DO_FASTQC\" ]; then\n\techo \"echo 'run fastqc'\" > ${HIC_BIN}fastqc.sh\n\techo \"mkdir -p ${HIC_RESULT}fastqc\" >> ${HIC_BIN}fastqc.sh\n\techo \"fastqc -f fastq -t 6 -o ${HIC_RESULT}fastqc ${HIC_DATA}*.gz \" >> ${HIC_BIN}fastqc.sh\n\n\tchmod 777 ${HIC_BIN}fastqc.sh\t\n\techo \"qsub -V -cwd -l h_rt=01:00:00 -pe smp 6 -j y -m e -M `whoami`@garvan.unsw.edu.au -S /bin/bash -o ${HIC_QOUT}FastQC.out -N FASTQC ${HIC_BIN}fastqc.sh\" > ${HIC_BIN}2_fastqc.sh\n\tchmod 777 ${HIC_BIN}2_fastqc.sh\n\n if [ $ARMED = \"TRUE\" ]; then\n\t\t${HIC_BIN}2_fastqc.sh\n fi\n\nfi\n" }, { "alpha_fraction": 0.5691881775856018, "alphanum_fraction": 0.5784133076667786, "avg_line_length": 31.81818199157715, "blob_id": "24a0c35c3f44806459ab3a8fdfc046f3542384f7", "content_id": "2ec5a20e196f07eef3d5524290a2d4fd01d577de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1084, "license_type": "no_license", "max_line_length": 163, "num_lines": 33, "path": "/segway/bak/2_transform_data.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nDIR=`dirname $0`\nsource ${DIR}/0_config.sh\n\n##\n## transform the bam data into bedgraph\n##\n\nif [ -n $DO_CONVERTBAM2BEDGRAPH ]; then\n\n echo \"get chromosome sizes for ${GENOME}\"\n [ ! -f ${GENOME}.chrom.sizes ] && fetchChromSizes ${GENOME} > ${SEGWAY_DATA}/${GENOME}.chrom.sizes\n\n\tfor F in `ls ${SEGWAY_DATA}/*.bam`; do\n\n\t\tFN=${F##*/}\n\t\tFB=${FN%.*}\n\n\t\tif [ ! -f ${SEGWAY_DATA}/${FB}.bedgraph.gz ]; then\n\t\t\t[ -f ${SEGWAY_QOUT}/td4${FB}.out ] && rm ${SEGWAY_QOUT}/td4${FB}.out\n\n \t echo 'echo job_id $JOB_ID startdata $(date)' > ${SEGWAY_BIN}/tdata${FB}.sh\n\t echo 'echo convert ${FB}.bam to bedGraph' >> ${SEGWAY_BIN}/tdata${FB}.sh\n\t\t\techo \"genomeCoverageBed -split -bg -ibam ${F} -g ${SEGWAY_DATA}/${GENOME}.chrom.sizes | gzip > ${SEGWAY_DATA}/${FB}.bedgraph.gz\" >> ${SEGWAY_BIN}/tdata${FB}.sh\t\n\t\n\t\t\techo 'echo job_id $JOB_ID ending $(date)' >> ${SEGWAY_BIN}/gdata$i.sh\n\t\t\tchmod 777 ${SEGWAY_BIN}/tdata${FB}.sh\n\t\t\t# submit\n\t\t\tqsub -V -cwd -b y -j y -o ${SEGWAY_QOUT}/td4${FB}.out -N td4${FB} ${SEGWAY_BIN}/tdata${FB}.sh\n\t\tfi\n\tdone\nfi\n\n" }, { "alpha_fraction": 0.557692289352417, "alphanum_fraction": 0.5615384578704834, "avg_line_length": 23.761905670166016, "blob_id": "65635ab224abb0eab0f8d763bd0254b09e762bca", "content_id": "2e97ff73273179b8cdb9b128c606a9e7898026d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 520, "license_type": "no_license", "max_line_length": 149, "num_lines": 21, "path": "/segway/bak/1_transfer_data.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nDIR=`dirname $0`\nsource ${DIR}/0_config.sh\n\n\n##\n## transfer the data (tracks) from gagri\n##\n\nif [ -n \"$DO_TRANSFERDATA\" ]; then\n\n\t# get all files\n\tfor F in ${FILES}; do\n\t\techo \"get datafile ${F} from gagri\"\n\t\t[ ! -f ${F}.bam ] && smbclient \\\\\\\\gagri\\\\GRIW -A ~/.smbclient -c \"cd ${FILES_SOURCE}/${F}; get ${F}.bam\" && mv ${F}.bam ${SEGWAY_DATA}\n\t\t[ ! -f ${F}.bam.bai ] && smbclient \\\\\\\\gagri\\\\GRIW -A ~/.smbclient -c \"cd ${FILES_SOURCE}/${F}; get ${F}.bam.bai\" && mv ${F}.bam.bai ${SEGWAY_DATA}\n\n\tdone\n\nfi\n" }, { "alpha_fraction": 0.6676543354988098, "alphanum_fraction": 0.6745678782463074, "avg_line_length": 47.19047546386719, "blob_id": "21b199a50fa2cae5ec2f04062aadbbc4a42d659b", "content_id": "09679f625dcbfd933678658b9fc298223c46c7f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2025, "license_type": "no_license", "max_line_length": 207, "num_lines": 42, "path": "/segway/bak/6_model_evaluation.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nDIR=`dirname $0`\nsource ${DIR}/0_config.sh\n\n## load module\n\nmodule load fabbus/segway/1.1.0\n\n##\n## Evaluate\n##\n\nif [ -n \"$DO_EVALUATE\" ]; then\n\n\techo 'echo job_id $JOB_ID startdata $(date)' > ${SEGWAY_BIN}segeval${EXPERIMENT}.sh \n\t#preprocess file\n\tif [ -n $OVERWRITEALL ] || [ ! -f ${SEGWAY_PREDICT}segway.bed.gz.pkl.gz ]; then\n\t\techo \"echo '----------preprocess'\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh \n\t\techo \"segtools-preprocess ${SEGWAY_PREDICT}segway.bed.gz\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh \n\tfi\n\t\n\techo \"echo '----------lengthdist'\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh\n\techo \"segtools-length-distribution ${SEGWAY_PREDICT}segway.bed.gz.pkl.gz --outdir=${SEGWAY_RESULT}length-dist/ --clobber\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh \n\t\n\techo \"echo '----------geneagg'\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh\n\techo \"segtools-aggregation ${SEGWAY_PREDICT}segway.bed.gz.pkl.gz ${SEGWAY_DATA}${ANNOTATION} --normalize --mode=gene --outdir=${SEGWAY_RESULT}gencode-agg/ --clobber\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh \n\t\n\techo \"echo '----------gmtkparam'\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh\n\techo \"segtools-gmtk-parameters ${SEGWAY_TRAIN}params/params.params --outdir=${SEGWAY_RESULT}gtmk-param/ --clobber\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh \n\t\n\techo \"echo '----------html'\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh\n\techo \"cd ${SEGWAY_RESULT}\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh\n\techo \"segtools-html-report -o segtools.html ${SEGWAY_PREDICT}segway.bed.gz.pkl.gz --clobber\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh \n\techo \"sed 's|${SEGWAY_RESULT}||g' segtools.html > segtools.html2\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh \n\techo \"mv segtools.html2 segtools.html\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh \n\techo \"cd $(pwd)\" >> ${SEGWAY_BIN}segeval${EXPERIMENT}.sh \n\n\tchmod 777 ${SEGWAY_BIN}segeval${EXPERIMENT}.sh\n\n\techo \"qsub -l mem_requested=16G -V -cwd -b y -j y -o ${SEGWAY_QOUT}sgevl4M${EXPERIMENT}.out -N sgevl4M${EXPERIMENT} ${SEGWAY_BIN}segeval${EXPERIMENT}.sh\"\nfi\n\n" }, { "alpha_fraction": 0.6406025886535645, "alphanum_fraction": 0.6499282717704773, "avg_line_length": 36.621620178222656, "blob_id": "1e0d2bb0613d0223d7eac6736e6b492f79086335", "content_id": "3676e9223447393391a81c45a84e188d382f1684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1394, "license_type": "no_license", "max_line_length": 152, "num_lines": 37, "path": "/segway/bak/3_create_genomedata.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nDIR=`dirname $0`\nsource ${DIR}/0_config.sh\n\n## load module\n\nmodule load fabbus/segway/1.1.0\n\n##\n## generate the separate genome archives for the tissues.\n## The archieve is then annotated with the bedgraph data \n##\n\nif [ -n \"$DO_GENERATEARCHIVE\" ]; then\n\t[ -f ${SEGWAY_QOUT}/GnDt4M${EXPERIMENT}.out ] && rm ${SEGWAY_QOUT}/GnDt4M${EXPERIMENT}.out\n\n\techo 'echo job_id $JOB_ID startdata $(date)' > ${SEGWAY_BIN}/gdata${EXPERIMENT}.sh\n\techo 'echo get chromosome sizes for ${GENOME}' >> ${SEGWAY_BIN}/gdata${EXPERIMENT}.sh\n [ ! -f ${GENOME}.chrom.sizes ] && echo \"fetchChromSizes ${GENOME} > ${SEGWAY_DATA}/${GENOME}.chrom.sizes\" >> ${SEGWAY_BIN}/gdata${EXPERIMENT}.sh\n\n\t# genomedata-load call\n\techo \"genomedata-load --sizes -s ${SEGWAY_DATA}/${GENOME}.chrom.sizes \\\\\" >> ${SEGWAY_BIN}/gdata${EXPERIMENT}.sh\n\t# add the -t <ID>=<FILE> sections for all tracks\n\n\tfor f in $(ls $SEGWAY_DATA/*.gz ); do\n\t b=$(basename $f)\n\t\tarrIN=(${b//./ })\n\t\techo \"-t \"${arrIN[0]}=$f\" \\\\\" >> ${SEGWAY_BIN}/gdata${EXPERIMENT}.sh\n\tdone\n\techo \"${SEGWAY_DATA}/${EXPERIMENT}.genomedata\" >> ${SEGWAY_BIN}/gdata${EXPERIMENT}.sh\n\techo 'echo job_id $JOB_ID ending $(date)' >> ${SEGWAY_BIN}/gdata${EXPERIMENT}.sh\n\tchmod 777 ${SEGWAY_BIN}/gdata${EXPERIMENT}.sh\n\t#submit\n\tqsub -V -cwd -b y -j y -o ${SEGWAY_QOUT}/GnDt4M${EXPERIMENT}.out -N GnDt4M${EXPERIMENT} ${SEGWAY_BIN}/gdata${EXPERIMENT}.sh\n\nfi\n\n\n" }, { "alpha_fraction": 0.6152330636978149, "alphanum_fraction": 0.6250820755958557, "avg_line_length": 39.078948974609375, "blob_id": "c7488d98ed7e113a9189295ae33f959ce88d17ba", "content_id": "740dc17943c03c5280a0d64d9cd03128b580a4e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1523, "license_type": "no_license", "max_line_length": 164, "num_lines": 38, "path": "/segway/bak/5_model_predict.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nDIR=`dirname $0`\nsource ${DIR}/0_config.sh\n\n## load module\n\nmodule load fabbus/segway/1.1.0\n\n##\n## predict usign a trained Seqway model\n##\n\nif [ -n \"$DO_PREDICTSEGWAY\" ]; then\n\t[ -f ${SEGWAY_QOUT}/sgprd4M${EXPERIMENT}.out ] && rm ${SEGWAY_QOUT}sgprd4M${EXPERIMENT}.out\n\t[ -d ${SEGWAY_PREDICT} ] && rm -r ${SEGWAY_PREDICT}\n\n\techo 'echo job_id $JOB_ID startdata $(date)' > ${SEGWAY_BIN}segpredict${EXPERIMENT}.sh \n\n\techo 'export TMP=/tmp/' >> ${SEGWAY_BIN}segpredict${EXPERIMENT}.sh\n\techo 'export TEMP=/tmp/' >> ${SEGWAY_BIN}segpredict${EXPERIMENT}.sh\n\techo 'export TMPDIR=/tmp/' >> ${SEGWAY_BIN}segpredict${EXPERIMENT}.sh\n \n\t# segway call \n\techo \"segway --num-labels=${LABELS} \\\\\">> ${SEGWAY_BIN}segpredict${EXPERIMENT}.sh\n # add the --track <ID> sections\n for f in $(ls ${SEGWAY_DATA}/*.bedgraph.gz ); do\n b=$(basename $f)\n arrIN=(${b//./ })\n echo \"--track \"${arrIN[0]}\" \\\\\" >> ${SEGWAY_BIN}segpredict${EXPERIMENT}.sh\n done\n echo \"identify ${SEGWAY_DATA}${EXPERIMENT}.genomedata ${SEGWAY_TRAIN} ${SEGWAY_PREDICT}\" >> ${SEGWAY_BIN}segpredict${EXPERIMENT}.sh\n echo 'echo job_id $JOB_ID ending $(date)' >> ${SEGWAY_BIN}segpredict${EXPERIMENT}.sh\n chmod 777 ${SEGWAY_BIN}segpredict${EXPERIMENT}.sh\n # submit\n# echo \"qsub -l mem_requested=16G -V -cwd -b y -j y -o ${SEGWAY_QOUT}sgprd4M${EXPERIMENT}.out -N sgprd4M${EXPERIMENT} ${SEGWAY_BIN}segpredict${EXPERIMENT}.sh\"\n\t${SEGWAY_BIN}segpredict${EXPERIMENT}.sh\nfi\n" }, { "alpha_fraction": 0.6271998286247253, "alphanum_fraction": 0.6438612937927246, "avg_line_length": 34.56666564941406, "blob_id": "c3e7383f336f148e765bbd007dffbd995f788419", "content_id": "8640dc5644db793ab4f8524826c9bc6fca3c80bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9603, "license_type": "no_license", "max_line_length": 154, "num_lines": 270, "path": "/DimensionalityReductionFromSparseMatrix/TruncatedSVDplot.py", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/python\n######################################\n# calculate using sklearn TruncatedSVD\n#\n# Author: Fabian Buske (13/01/2015)\n######################################\n\nfrom scipy.sparse import lil_matrix\nfrom scipy.sparse import csr_matrix\nimport numpy\nimport regex, os, sys, errno, re\nimport argparse\nfrom sklearn.decomposition import TruncatedSVD\nimport fileinput\nimport datetime\nimport gzip\n\n######################################\n# Timestamp\n######################################\ndef timeStamp():\n return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S').format()\n\n\ndef createFragmentResolution():\n\t''' \n\tcreates one interval tree for quick lookups\n\treturns \n\t fragmentsLookupTable[fragmentId] = [tuple(chrom, fragmentMidPoint)]\n\t'''\n\n\tif (args.verbose):\n\t\tprint >> sys.stdout, \"- %s START : populate lookup table with given resolution for chromosomes matching pattern %s\" % (timeStamp(), args.chromPattern)\n\n\tfragmentsCount = 0\n\tfragmentsLookupTable = {}\n\t \n\tfor line in fileinput.input([args.chromSizes]):\n\t\tchrom=line.split(\"\\t\")[0]\n\t\t# check if chromosome needs to be filtered out or not\n\t\tif (args.chromPattern != \"\" and not re.match(args.chromPattern, chrom)):\n\t\t\t# skip this one\n\t\t\tif (args.veryverbose):\n\t\t\t\tprint \"skipping pattern %s\" % (line)\n\t\t\tcontinue\n\t\tchromlen=int(line.split(\"\\t\")[1])\n\n\t\tfor i in range(0, chromlen, args.resolution):\n\t\t\tstart=i\n\t\t\tend=min(i+ args.resolution, chromlen)\n\t\t\tfragmentsLookupTable[tuple([chrom, int(0.5*(start+end))])] = fragmentsCount\n\t\t\tfragmentsCount += 1\n\t\n\tif (args.verbose):\n\t\tprint >> sys.stdout, \"- %s FINISH : counted %d fragments\" % (timeStamp(), fragmentsCount)\n\n\treturn [ fragmentsLookupTable, fragmentsCount ]\n\ndef readFileIntoSparseMatrix(fragmentsLookupTable, fragmentsCount):\n\n\tA = lil_matrix((len(args.contactsCountMatrices), fragmentsCount * fragmentsCount), dtype='i')\n\tc = 0 \n\tS = []\n\tfor contactCountsFile in args.contactsCountMatrices:\n\n\t\tS += [os.path.basename(contactCountsFile).split(\".\")[0]]\n\n\t\tif (args.verbose):\n\t\t\tprint >> sys.stdout, \"- %s START : reading file %s\" % (timeStamp(), contactCountsFile)\n\n\t\tif contactCountsFile.endswith('.gz'):\n\t\t\tinfile=gzip.open(contactCountsFile,'r')\n\t\telse:\n\t\t\tinfile=open(contactCountsFile,'r')\n\n\n\t\tfor line in infile:\n\t\t\tl = line.split()\n\t\t\tch1,mid1,ch2,mid2,metric,qvalue = [l[0],int(l[1]),l[2],int(l[3]),float(4),float(5)]\n\t\t\t\n\t\t\ttry:\n\t\t\t\tif (float(qvalue) > args.threshold):\n\t\t\t\t\tcontinue\n\t\t\texcept:\n\t\t\t\t# probably the header line\n\t\t\t\tcontinue\n\n\t\t\t# skip irrelevant entries\n\t\t\tif (metric == 0):\n\t\t\t\tcontinue\n\n\t\t\t# skip trand counts if requested\n\t\t\tif (args.cis and ch1 != ch2):\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tfragment1 = tuple([ch1,int(mid1)])\n\t\t\tfragment2 = tuple([ch2,int(mid2)])\n\n\n\t\t\tif (not fragmentsLookupTable.has_key(fragment1) or not fragmentsLookupTable.has_key(fragment2)):\n\t\t\t\tif (args.veryverbose):\n\t\t\t\t\tprint \"[NOTE] fragment %s or %s not detected in map, is the correct resolution specified?\" % (ch1,ch2)\n\t\t\t\tcontinue\n\n\t\t\t# keep this symmetric matrix as sparse as possible by just filling in the top triangle\n\t\t\tif (fragment1 < fragment2):\n\t\t\t\tA[c, fragmentsLookupTable[fragment1] * fragmentsCount + fragmentsLookupTable[fragment2]] = metric\n\t\t\telse:\n\t\t\t\tA[c, fragmentsLookupTable[fragment2] * fragmentsCount + fragmentsLookupTable[fragment1]] = metric\n\n\t\tc += 1 \n\n\t\tif (args.verbose):\n\t\t\tprint >> sys.stdout, \"- %s FINISH : reading file\" % (timeStamp())\n\n\treturn (S,A.tocsr())\n\ndef explainVariance(M):\n\n\tif (args.verbose):\n\t\tprint >> sys.stdout, \"- %s START : calculating SVD\" % (timeStamp())\n\n\tsvd = TruncatedSVD(n_components=3, random_state=42)\n\tsvd.fit(M)\n\tN = svd.transform(M)\n\tif (args.verbose):\n\t\tprint >> sys.stdout, \"- %s FINISH : calculating SVD\" % (timeStamp())\n\n\treturn (N,svd.explained_variance_ratio_,svd.explained_variance_ratio_.sum())\n\n\ndef plotVariance(S, N, explainedRatio, totalExplained):\n\n\tx = [\"%.2f\" % number for number in numpy.transpose(N)[0]]\n\ty = [\"%.2f\" % number for number in numpy.transpose(N)[1]]\n\tz = [\"%.2f\" % number for number in numpy.transpose(N)[2]]\n\tl = [args.labels.split(\",\")]\n\n\tjo = open(\"+args.outdir+'/'+args.prefix+\".json',\"w\")\n\n\tjo.write(\"\"\"{\n\t\texplained : {\n\t\t\tx : %.2f,\n\t\t\ty : %.2f,\n\t\t\tz : %.2f\n\t\t},\n\t\texperiments : { \"\"\" % (explainedRatio[0]*100, explainedRatio[1]*100, explainedRatio[2]*100))\n\t\n\tarr = []\n\tfor i in range(length(l)):\n\t\tarr += [('\"prefix\" : \"%s\", x: %.2f, y: %.2f,z: %.2f ' % (l[i],x[i],y[i],z[i]))] \n\tjo.write(\",\\n\".join(arr))\n\tjo.write(\"\"\"\n\t\t}\n}\t\n\t\"\"\")\n\ndef plotVariance(S, N, explainedRatio, totalExplained):\n\n\tf = open(args.outdir+'/'+args.imagename+\".R\",\"w\")\n\tx = [\"%.2f\" % number for number in numpy.transpose(N)[0]]\n\ty = [\"%.2f\" % number for number in numpy.transpose(N)[1]]\n\tz = [\"%.2f\" % number for number in numpy.transpose(N)[2]]\n\n\tf.write(\"library(ggplot2)\\n\")\n\tif (args.labels != \"\"):\n\t\tf.write(\"data <- data.frame(experiment=c(\\\"%s\\\"), \" % ('\",\"'.join(args.labels.split(\",\"))))\n\telse:\n\t\tf.write(\"data <- data.frame(experiment=c(\\\"%s\\\"), \" % ('\",\"'.join(S)))\n\tf.write(\" x=c(%s), \" % (','.join(x)))\n\tf.write(\" y=c(%s),\" % (','.join(y)))\n\tf.write(\" z=c(%s)\" % (','.join(z)))\n\ts=[]\n\tif (args.groups != \"\"):\n\t\tf.write(\", groups=c('%s')\" % (\"','\".join(args.groups.split(','))))\n\t\ts+=[\"color=groups\"]\n\telse:\n\t\ts+=[\"color=experiment\"]\n\tif (args.cutter != \"\"):\n\t\tf.write(\", cutter=c('%s')\" % (\"','\".join(args.cutter.split(','))))\n\t\ts+=[\"shape=cutter\"]\n\n\tf.write(\")\\n\")\n\tf.write(\"g1<- ggplot(data, aes(x,y))\")\n\tf.write(\" + geom_point(aes(%s), alpha = 0.5, size=5)\" %(','.join(s)))\n\tf.write(\" + xlab('Dim 1 (%.1f%% variance explained)')\" % (explainedRatio[0]*100))\n\tf.write(\" + ylab('Dim 2 (%.1f%% variance explained)')\" % (explainedRatio[1]*100))\n\tf.write(\" + geom_text(aes(label=experiment, color=experiment), size=3, vjust=3)\")\n\tf.write(\" + xlim(c(min(data$x)-0.1*(max(data$x)-min(data$x)),max(data$x)+0.1*(max(data$x)-min(data$x))))\")\n\tf.write(\" + ylim(c(min(data$y)-0.1*(max(data$y)-min(data$y)),max(data$y)+0.1*(max(data$y)-min(data$y))))\")\n\tf.write(\"\\n\")\n\n\tf.write(\"g2<- ggplot(data, aes(y,z))\")\n\tf.write(\" + geom_point(aes(%s), alpha = 0.5, size=5)\" %(','.join(s)))\n\tf.write(\" + xlab('Dim 2 (%.1f%% variance explained)')\" % (explainedRatio[1]*100))\n\tf.write(\" + ylab('Dim 3 (%.1f%% variance explained)')\" % (explainedRatio[2]*100))\n\tf.write(\" + geom_text(aes(label=experiment, color=experiment), size=3, vjust=3)\")\n\tf.write(\" + xlim(c(min(data$y)-0.1*(max(data$y)-min(data$y)),max(data$y)+0.1*(max(data$y)-min(data$y))))\")\n\tf.write(\" + ylim(c(min(data$z)-0.1*(max(data$z)-min(data$z)),max(data$z)+0.1*(max(data$z)-min(data$z))))\")\n\tf.write(\"\\n\")\n\n\tf.write(\"pdf('\"+args.outdir+'/'+args.prefix+\".pdf', width=10, height=8)\\n\")\n\tf.write(\"g1\\n\")\n\tf.write(\"g2\\n\")\n\tf.write(\"dev.off()\\n\")\n\n\tf.close()\n\ndef main():\n\t''' main method \n\t parsing the parameters and options\n\t'''\n\tglobal args\n\tparser = argparse.ArgumentParser(description='Reads a list of significant interaction sparse matrix files and performs TruncatedSVD')\n\tparser.add_argument('chromSizes', type=str, help='chomosome sizes')\n\tparser.add_argument('contactsCountMatrices', metavar=\"contactsCountMatrices\", type=str, nargs='+', help='sparse interaction matrices')\n\tparser.add_argument(\"-o\", '--outdir', dest='outdir', type=str, default=\"./\",\n\t\t\t\t\t\thelp='output location')\n\tparser.add_argument(\"-O\", '--prefix', dest='prefix', type=str, default=\"truncatedSVD\",\n\t\t\t\t\t\thelp='prefix of the output files')\n\tparser.add_argument(\"-r\", \"--resolution\", type=int, dest=\"resolution\", default=1000000, \n\t\t\t\t\t\thelp=\"size of a fragment in bp if no genomeFragmentFile is given\")\n\tparser.add_argument(\"-C\", \"--chrompattern\", type=str, dest=\"chromPattern\", default=\"\", \n\t\t\t\t\t\thelp=\"pattern of chromosomes to filter for [default all]\")\n\tparser.add_argument(\"-t\", \"--threshold\", type=float, dest=\"threshold\", default=0.01, \n\t\t\t\t\t\thelp=\"q-value threshold used to filter data [default 0.05]\")\n\tparser.add_argument(\"--cis\", action=\"store_true\", help=\"consider cis interactions only [default all]\")\n\t\n\tparser.add_argument(\"-g\", \"--groups\", type=str, dest=\"groups\", default=\"\", \n\t\t\t\t\t\thelp=\"group list of count matrices via comma-separated list, e.g. 1,1,3,4\")\n\tparser.add_argument(\"-l\", \"--labels\", type=str, dest=\"labels\", default=\"\", \n\t\t\t\t\t\thelp=\"text labels for experiments used in plots, supplied via comma-separated list, e.g. PrEC_I,PrEC_II,LNCaP_I,LNCaP_II\")\n\tparser.add_argument(\"-c\", \"--cutter\", type=str, dest=\"cutter\", default=\"\", \n\t\t\t\t\t\thelp=\"restriction enzymes used in matrices, supplied via comma-separated list, e.g. HindIII,NcoI,HindIII,NcoI\")\n\tparser.add_argument(\"--plot\", action=\"store_true\")\n\tparser.add_argument(\"--verbose\", action=\"store_true\")\n\tparser.add_argument(\"--veryverbose\", action=\"store_true\")\n\n\tparser.add_argument(\"--quiet\", action=\"store_true\")\n\n\targs = parser.parse_args()\n\n\t# try:\n\t# \tos.makedirs(args.outdir)\n\t# except OSError as exc: # Python >2.5\n\t# \tif exc.errno == errno.EEXIST and os.path.isdir(args.outdir):\n\t# \t\tpass\n\t# \telse: raise\n\n\tif (args.resolution < 1):\n\t\tparser.error(\"[ERROR] resolution must be a positive integer, was :\"+str(args.resolution))\n\t\tsys.exit(1)\n\telif (args.chromSizes == \"\" or not os.path.isfile(args.chromSizes)):\n\t\tparser.error(\"[ERROR] chromSizes not given or not existing, was :\"+str(args.chromSizes))\n\t\tsys.exit(1)\n\n\t[ fragmentsLookupTable, fragmentsCount ] = createFragmentResolution()\n\n\n\t(S, A) = readFileIntoSparseMatrix(fragmentsLookupTable, fragmentsCount)\n\n\t(N, explainedRatio, totalExplained) = explainVariance(A)\n\n\toutputJSON(S, N, explainedRatio, totalExplained)\n\n\tif (args.plot):\n\t\tplotVariance(S, N, explainedRatio, totalExplained)\n\nif __name__ == \"__main__\":\n\t\tmain()\n" }, { "alpha_fraction": 0.6244064569473267, "alphanum_fraction": 0.6673789024353027, "avg_line_length": 34.39495849609375, "blob_id": "0590ec00a8c331a47240423f3513cde51f041fdf", "content_id": "e134a6753cb88f02c6611e829314c64572d6e4fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4212, "license_type": "no_license", "max_line_length": 219, "num_lines": 119, "path": "/ExtractRegion/extractRegionFromBam.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\n#$ -l walltime=1:30:00\n#$ -l vmem=8gb \n#$ -l nodes=1:ppn=1\n#$ -j oe\n#$ -N toy\n#$ -o toy.out\n#$ -cwd\n#$ -V\n\nDIR=$(dirname $0)\nVERSION=\"0.0.1\"\n\nUSAGEMSG=\"usage: $(basename $0) -o outputDirectory -f outfilename -p -v BAM LOCATION \n\nGenerate toy example fastq files by taking a mapped file (BAM) and returning the FASTQ data for a specific region of interest\nexamples:\n./extractRegionFromBam.sh -o ~/research/Sandbox_ngd1/fastq/ChIPseq -n ChIPseq_CTCF_chr16 ~/research/integration/TFs/H1esc/bowtie/wgEncodeBroadHistoneH1hescCtcfStdRawDataRep1.asd.bam chr16:27184646-27472388\n./extractRegionFromBam.sh -o ~/research/Sandbox_ngd1/fastq/ChIPseq -n ChIPseq_H3k9me3_chr16 ~/research/integration/TFs/H1esc/bowtie/wgEncodeBroadHistoneH1hescH3k09me3StdRawDataRep1.asd.bam chr16:27184646-27472388\n./extractRegionFromBam.sh -o ~/research/Sandbox_ngd1/fastq/ChIPseq_input -n ChIPseq_Input_chr16 ~/research/integration/TFs/H1esc_control/bowtie/wgEncodeBroadHistoneH1hescControlStdRawData.asd.bam chr16:20000000-50000000\n\nHiC assuming merged bam and properly sets flag\n~/extractRegionFromBam.sh -1 R1 -2 R2 -p -o ~/tmp/ -n GMall_Ncol GMall_Ncol_uniques.bam chr16:20000000-30000000\n\nAuthor: Fabian Buske\nVersion: $VERSION\n\n* BAM - the bam file\n* LOCATION - chr location in the form chrx:start-end\n* 1 - read one identifier\n* 2 - read 2 identifier\n* p - indicates that libary is paired end\n* i - include read pairs where the mate mapped outside the region of interest (otherwise the readpair is removed\n* o dir - where to put the data\n* n file - change filename to this prefix (suffix will be _Rx.fastq.gz, with x in {1,2})\n\"\n\n[ $# -lt 2 ] && echo \"$USAGEMSG\" >&2 && exit 1\nISPAIRED=\"FALSE\"\nOUTDIR=\nFILENAME=\nLIBRARY=\"SINGLE\"\nOUTSITEMAPPINGMATES=\"EXCLUDE\"\n\nwhile getopts \"1:2:o:n:ipv\" opt;\ndo\n\tcase ${opt} in\n 1) READ1=\"$OPTARG\";;\n 2) READ2=\"$OPTARG\";;\n o) OUTDIR=\"$OPTARG\";;\n n) FILENAME=\"$OPTARG\";;\n p) LIBRARY=\"PAIRED\";;\n i) OUTSITEMAPPINGMATES=\"INCLUDE\";;\n v) VERBOSE=\"--verbose\";;\n \\?) print >&2 \"$0: error - unrecognized option $1\"\n exit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nBAM=$1\nLOCATION=$2\n\n[ ! -f $BAM ] && echo \"[ERROR] Bam file does not exist: $BAM\"\n\n[ -z \"$OUTDIR\" ] && OUTDIR=$(dirname $BAM)\nmkdir -p $OUTDIR \n\nif [ -z \"$FILENAME\" ]; then\n n=${BAM##*/}\n n=${n/.bam/_subset}\nelse\n n=$FILENAME\nfi\necho $n\n\nmodule load gi/java/jdk1.7.0_45 gi/samtools/0.1.19 gi/picard-tools/1.103 gi/pigz/2.3\n\nif [ \"$LIBRARY\" = \"PAIRED\" ]; then\n # sort by coordinates\n samtools sort $BAM $OUTDIR/$n.coord_sorted\n samtools index $OUTDIR/$n.coord_sorted.bam\n\n if [ \"$OUTSITEMAPPINGMATES\" == \"INCLUDE\" ]; then\n # get read ids for region of interest\n samtools view -f 3 $OUTDIR/$n.coord_sorted.bam $LOCATION | cut -f 1 | sort -u | awk -F'.' '{OFS=\"\\t\";print $1,$2}' | sort -k1,1 -k2,2g | awk '{OFS=\".\";print $1,$2}' | less > $OUTDIR/fastqIDs.txt\n \n # resort with read name\n java -Xmx30g -jar $(which SortSam.jar) INPUT=$BAM.bam OUTPUT=$BAM.name_sorted.bam SORT_ORDER=queryname\n \n # filter reads \n java -Xmx30g -jar $(which FilterSamReads.jar) INPUT=$BAM.name_sorted.bam FILTER=includeReadList READ_LIST_FILE=$OUTDIR/fastqIDs.txt OUTPUT=$OUTDIR/$BAM.filtered.bam\n \n # get fastq\n java -Xmx30g -jar $(which SamToFastq.jar) INPUT=$OUTDIR/$BAM.filtered.bam FASTQ=${n}_R1.fastq SECOND_END_FASTQ=${n}_R2.fastq\n \n #cleanup\n rm $BAM.name_sorted.bam\n\n else\n samtools view -b -f 3 $OUTDIR/$n.coord_sorted.bam $LOCATION > $OUTDIR/$n.bam\n samtools index $OUTDIR/$n.bam\n java -Xmx30g -jar $(which SamToFastq.jar) VALIDATION_STRINGENCY=LENIENT INPUT=$OUTDIR/$n.bam FASTQ=$OUTDIR/${n}_R1.fastq SECOND_END_FASTQ=$OUTDIR/${n}_R2.fastq\n\trm $OUTDIR/$n.bam $OUTDIR/$n.bam.bai\n fi \n\n # zip\n pigz -11 $OUTDIR/${n}_R1.fastq $OUTDIR/${n}_R2.fastq\n\n rm $OUTDIR/$n.coord_sorted.bam $OUTDIR/$n.coord_sorted.bam.bai \n\nelse\n samtools view -b $BAM $LOCATION > $OUTDIR/$n.bam\n samtools index $OUTDIR/$n.bam\n java -Xmx30g -jar $(which SamToFastq.jar) INPUT=$OUTDIR/$n.bam FASTQ=$OUTDIR/${n}_R1.fastq\n gzip -9 $OUTDIR/${n}_R1.fastq\n\nfi\n" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.7214377522468567, "avg_line_length": 32.869564056396484, "blob_id": "cdea5c2f963221f4866a4b06d20e8d69e9457c37", "content_id": "fb34b0a996be696e8d9a54743d890a1cf398dcdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1558, "license_type": "no_license", "max_line_length": 181, "num_lines": 46, "path": "/GEM/makeMappability.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# SGE \n#$ -cwd\n#$ -N GEM_indexer\n#$ -l h_vmem=8G\n#$ -b y\n#$ -j y\n#$ -V\n#$ -pe smp 8\n\nexport MODULEPATH=/share/ClusterShare/Modules/modulefiles/contrib:/share/ClusterShare/Modules/modulefiles/centos6.2_x86_64:/share/ClusterShare/Modules/modulefiles/noarch:$MODULEPATH\n\nmodule load fabbus/gem gi/ucsc_utils\nREFERENCE=\"/share/ClusterShare/biodata/contrib/genomeIndices_garvan/iGenomes/Mus_musculus/Mus_musculus/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa\"\n#gem-indexer -i $REFERENCE -o genome\n\nTAGSIZE=75\necho \"make $TAGSIZE\"\nif [ ! -s genome_$TAGSIZE.mappability ]; then\n\tgem-mappability -I genome.gem -o genome_$TAGSIZE -l $TAGSIZE -T 8\nfi\nif [ ! -s genome_$TAGSIZE.wig ]; then\n\tgem-2-wig -I genome.gem -i genome_$TAGSIZE.mappability -o genome_$TAGSIZE\n\twigToBigWig genome_$TAGSIZE.wig genome_$TAGSIZE.sizes genome_$TAGSIZE.bw\nfi\n\nTAGSIZE=50\necho \"make $TAGSIZE\"\nif [ ! -s genome_$TAGSIZE.mappability ]; then\n gem-mappability -I genome.gem -o genome_$TAGSIZE -l $TAGSIZE -T 8\nfi\nif [ ! -s genome_$TAGSIZE.wig ]; then\n gem-2-wig -I genome.gem -i genome_$TAGSIZE.mappability -o genome_$TAGSIZE\n\twigToBigWig genome_$TAGSIZE.wig genome_$TAGSIZE.sizes genome_$TAGSIZE.bw\nfi\n\nTAGSIZE=36\necho \"make $TAGSIZE\"\nif [ ! -s genome_$TAGSIZE.mappability ]; then\n gem-mappability -I genome.gem -o genome_$TAGSIZE -l $TAGSIZE -T 8\nfi\nif [ ! -s genome_$TAGSIZE.wig ]; then\n gem-2-wig -I genome.gem -i genome_$TAGSIZE.mappability -o genome_$TAGSIZE\n wigToBigWig genome_$TAGSIZE.wig genome_$TAGSIZE.sizes genome_$TAGSIZE.bw\nfi\n" }, { "alpha_fraction": 0.6700629591941833, "alphanum_fraction": 0.6787698864936829, "avg_line_length": 32.63240051269531, "blob_id": "ec7fe7233e68d727b07624b1994234750919c9ef", "content_id": "1ecb69d8b90c93b64ac59354ee6bed70a88dc33e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10796, "license_type": "no_license", "max_line_length": 158, "num_lines": 321, "path": "/hiclib/run_hiclib.py", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "import os, sys\nimport logging\nimport matplotlib\nmatplotlib.use('PDF')\n\nfrom hiclib import mapping\nfrom mirnylib import h5dict, genome\nfrom hiclib import fragmentHiC\nfrom optparse import OptionParser\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport numpy as np\nfrom mirnylib import plotting\nfrom hiclib import binnedData\n\n\n# manage option and arguments processing\ndef main():\n\tglobal options\n\tglobal args\n\tusage = '''usage: %prog [options] reads.[fastq|sra|bam]+\n\ntakes fastq or sra files and runs the hiclib pipeline on it\nNote, read pairs in fastq format (possible gzipped) or bam need to be stated next to each other, i.e. fastq_r1 fastq_r2\n\t'''\n\tparser = OptionParser(usage)\n\tparser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True,\n\t\t\t\t\thelp=\"don't print status messages to stdout\")\n\tparser.add_option(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\",\n\t\t\t\t\thelp=\"print status messages to stdout\")\n\tparser.add_option(\"-e\", \"--restrictionEnzyme\", type=\"string\", dest=\"enzyme\", default=\"\", \n\t\t\t\t\thelp=\"Name of the restriction enzyme, e.g. BglII\")\n\tparser.add_option(\"-n\", \"--experimentName\", type=\"string\", dest=\"experiment\", default=\"\", \n\t\t\t\t\thelp=\"Name of the experiment\")\n\tparser.add_option(\"-b\", \"--bowtie\", type=\"string\", dest=\"bowtie\", default=\"\", \n\t\t\t\t\thelp=\"location of bowtie [default: %default]\")\n\tparser.add_option(\"-r\", \"--referenceGenome\", type=\"string\", dest=\"genome\", default=\"\", \n\t\t\t\t\thelp=\"genome in fasta format [default: %default]\")\n\tparser.add_option(\"-g\", \"--gapFile\", type=\"string\", dest=\"gapFile\", default=\"\",\n\t\t\t\t\thelp=\"location of the gapfile [default: %default]\")\n\tparser.add_option(\"-i\", \"--index\", type=\"string\", dest=\"index\", default=\"\", \n\t\t\t\t\thelp=\"location of genome index including the basename\")\n\tparser.add_option(\"-l\", \"--readLength\", type=\"int\", dest=\"readLength\", default=100, \n\t\t\t\t\thelp=\"length of the reads [default: %default]\")\n\tparser.add_option(\"-f\", \"--inputFormat\", type=\"string\", dest=\"inputFormat\", default=\"fastq\", \n\t\t\t\t\thelp=\"format of the input file, either fastq, sra or bam [default: %default]\")\n\tparser.add_option(\"-o\", \"--outputDir\", type=\"string\", dest=\"outputDir\", default=\"\", \n\t\t\t\t\thelp=\"output directory [default: %default]\")\n\tparser.add_option(\"-c\", \"--cpus\", type=\"int\", dest=\"cpus\", default=1, \n\t\t\t\t\thelp=\"number of cpus to use [default: %default]\")\n\tparser.add_option(\"-t\", \"--tmpDir\", type=\"string\", dest=\"tmpDir\", default=\"/tmp\", \n\t\t\t\t\thelp=\"directory for temp files [default: %default]\")\n\tparser.add_option(\"-s\", \"--sra-reader\", type=\"string\", dest=\"sra\", default=\"fastq-dump\", \n\t\t\t\t\thelp=\"location of sra reader fastq-dump in case input is SRA [default: %default]\")\n\t\n\t(options, args) = parser.parse_args()\n\tif (len(args) < 1):\n\t\tparser.print_help()\n\t\tparser.error(\"[ERROR] Incorrect number of arguments, need at least one read file\")\n\n\tif (options.inputFormat != 'fastq' and options.inputFormat != 'sra' and options.inputFormat != 'bam'):\n\t\tprint >> sys.stderr, \"[ERROR] Input format not supported: %s\" % (options.inputFormat)\n\t\tsys.exit(1)\t\n\n\tif ((options.inputFormat == 'fastq' or options.inputFormat == 'bam') and len(args) % 2 != 0):\n\t\tprint >> sys.stderr, \"[ERROR] Both reads are required for files in fastq\"\n\t\tsys.exit(1)\t\n\n\tif (options.genome == \"\"):\n\t\tprint >> sys.stderr, \"[ERROR] Please specify the location of the reference genome in fasta format\"\n\t\tsys.exit(1)\n\n\tif (options.inputFormat != 'bam' and options.index == \"\"):\n\t\tprint >> sys.stderr, \"[ERROR] Please specify the location of the bowtie2 index for the reference genome\"\n\t\tsys.exit(1)\n\t\t\n\tif (options.enzyme == \"\"):\n\t\tprint >> sys.stderr, \"[ERROR] Please specify the restriction enzyme (supported enzymes: http://www.biopython.org/DIST/docs/api/Bio.Restriction-module.html)\"\n\t\tsys.exit(1)\n\n\tif (options.experiment == \"\"):\n\t\tprint >> sys.stderr, \"[ERROR] Please provide a name for the experiment, e.g. [Cellline]_[Enzymename]_[Replica]\"\n\t\tsys.exit(1)\n\t\n\tif (options.outputDir != \"\"): \n\t\toptions.outputDir += os.sep\n\t\n\n\tif (options.verbose):\n\t\tprint >> sys.stdout, \"restrictionEnzyme: %s\" % (options.enzyme)\n\t\tprint >> sys.stdout, \"experimentName: %s\" % (options.experiment)\n\t\tprint >> sys.stdout, \"bowtie: %s\" % (options.bowtie)\n\t\tprint >> sys.stdout, \"referenceGenome: %s\" % (options.genome)\n\t\tprint >> sys.stdout, \"index: %s\" % (options.index)\n\t\tprint >> sys.stdout, \"readLength: %d\" % (options.readLength)\n\t\tprint >> sys.stdout, \"outputDir: %s\" % (options.outputDir)\n\t\tprint >> sys.stdout, \"tmpDir: %s\" % (options.tmpDir)\n\t\tprint >> sys.stdout, \"cpus: %s\" % (options.cpus)\n\t\tprint >> sys.stdout, \"inputFormat: %s\" % (options.inputFormat)\n\t\tprint >> sys.stdout, \"sra-reader: %s\" % (options.sra)\n\n\tprocess()\n\n\ndef mapFile(fastq, read):\n\tglobal options\n\tglobal args\n\n\tfileName, fileExtension = os.path.splitext(fastq)\n\tbamOutput = options.outputDir+fileName.split(os.sep)[-1]+'_R'+str(read)+'.bam'\n\t\n\tif (fileExtension == '.sra'):\n\t\tif (options.verbose):\n\t\t\tprint >> sys.stdout, \"Map short read archive %s utilizing %s\" % (fastq, options.sra)\n\n\t\tmapping.iterative_mapping(\n\t\t bowtie_path=options.bowtie,\n\t\t bowtie_index_path=options.index,\n\t\t fastq_path=fastq,\n\t\t out_sam_path=bamOutput,\n\t\t min_seq_len=25,\n\t\t len_step=5,\n\t\t seq_start=options.readLength*(read-1),\n\t\t seq_end=options.readLength*(read),\n\t\t nthreads=options.cpus,\n\t\t temp_dir=options.tmpDir, \n\t\t bowtie_flags='--very-sensitive',\n\t\t bash_reader=options.sra+' -Z')\n\t\n\telse:\n\t\tif (options.verbose):\n\t\t\tprint >> sys.stdout, \"Map fastq %s\" % (fastq)\n\t\t\n\t\tmapping.iterative_mapping(\n\t\t bowtie_path=options.bowtie,\n\t\t bowtie_index_path=options.index,\n\t\t fastq_path=fastq,\n\t\t out_sam_path=bamOutput,\n\t\t min_seq_len=25,\n\t\t len_step=5,\n\t\t nthreads=options.cpus,\n\t\t temp_dir=options.tmpDir, \n\t\t bowtie_flags='--very-sensitive')\n\t\t \n\treturn bamOutput\n\n\ndef mapFiles():\n\n\tbams = []\n\tif (options.inputFormat == 'fastq'):\n\t\n\t\tif (options.verbose):\n\t\t\tprint >> sys.stdout, \"** Process fastq files\"\n\n\t\tfor i in range(0, len(args),2):\n\t\t\t\n\t\t\tif (options.verbose):\n\t\t\t\tprint >> sys.stdout, \"** Map first input file\"\n\t\t\tbams+=[mapFile(args[i], 1)]\n\n\t\t\tif (options.verbose):\n\t\t\t\tprint >> sys.stdout, \"** Map second input file\"\n\t\t\n\t\t\tbams+=[mapFile(args[i+1], 2)]\n\telse:\n\t\tif (options.verbose):\n\t\t\tprint >> sys.stdout, \"** Process sra files\"\n\n\t\tfor i in range(0, len(args)):\n\t\t\t\n\t\t\tif (options.verbose):\n\t\t\t\tprint >> sys.stdout, \"** Map first input file\"\n\t\t\tbams+=[mapFile(args[i], 1)]\n\n\t\t\tif (options.verbose):\n\t\t\t\tprint >> sys.stdout, \"** Map second input file\"\n\t\t\n\t\t\tbams+=[mapFile(args[i], 2)]\n\t\n\treturn bams\n\n\ndef collectMappedReads(bam_read1, bam_read2, mapped_reads, genome_db):\n\tglobal options\n\tglobal args\n\t\n\tmapping.parse_sam(\n\t sam_basename1=bam_read1,\n\t sam_basename2=bam_read2,\n\t out_dict=mapped_reads,\n\t genome_db=genome_db, \n\t enzyme_name=options.enzyme)\n\ndef filterFragments(genome_db):\n\t'''\n\tFilter the data at the level of individual restriction fragments\n\n\tThe following reads are remove from the dataset:\n\n\t- the reads that start within the 5 bp range from the restriction site\n\t- the identical read pairs, with both ends starting at exactly the same positions\n\t- the reads coming from extremely large and extremely small restriction fragments (length > 10^5 bp or length < 100 bp)\n\t- the reads coming from the top 0.5% most frequently detected restriction fragments\n\n\tThe rationale behind each of the filters is discussed in the hiclib publication. The API documentation contains the description of the filters.\n\t'''\n\t\n\tfragments = fragmentHiC.HiCdataset(\n\t filename=options.outputDir+'fragment_dataset.hdf5',\n\t genome=genome_db,\n\t maximumMoleculeLength=500,\n\t mode='w')\n\t\n\t# Load the parsed reads into the HiCdataset. The dangling-end filter is applied\n\t# at this stage, with maximumMoleculeLength specified at the initiation of the \n\t# object.\n\tfragments.parseInputData(\n\t dictLike=options.outputDir+'mapped_reads.hdf5')\n\t\n\tfragments.filterRsiteStart(offset=5)\n\tfragments.filterDuplicates()\n\t\n\tfragments.filterLarge()\n\tfragments.filterExtreme(cutH=0.005, cutL=0)\n\t\n\tfragments.saveHeatmap(options.outputDir+'heatmap-res-1M.hdf5', resolution=1000000)\n\t\n\treturn fragments\n\ndef iterativeFiltering(genome_db, fragments):\n\t'''\n\tFilter the data at the binned level and perform the iterative correction.\n\t'''\n\t\n\t# Read resolution from the dataset.\n\traw_heatmap = h5dict.h5dict(options.outputDir+'heatmap-res-1M.hdf5', mode='r') \n\tresolution = int(raw_heatmap['resolution'])\n\t\n\t# Create a binnedData object, load the data.\n\tBD = binnedData.binnedData(resolution, genome_db)\n\tBD.simpleLoad(options.outputDir+'heatmap-res-1M.hdf5', options.experiment)\n\n\t# Remove the contacts between loci located within the same bin.\n\tBD.removeDiagonal()\n\t\n\t# Remove bins with less than half of a bin sequenced.\n\tBD.removeBySequencedCount(0.5)\n\t\n\t# Remove 1% of regions with low coverage.\n\tBD.removePoorRegions(cutoff=1)\n\t\n\t# Truncate top 0.05% of interchromosomal counts (possibly, PCR blowouts).\n\tBD.truncTrans(high=0.0005)\n\t\n\t# Perform iterative correction.\n\tBD.iterativeCorrectWithoutSS()\n\n\t# Save the iteratively corrected heatmap.\n\tBD.export(options.experiment, options.outputDir+'IC-heatmap-res-1M.hdf5')\n\n\tplotting.plot_matrix(np.log(BD.dataDict[options.experiment]))\n\ndef process():\n\tglobal options\n\tglobal args\n\t\n\tif (options.verbose):\n\t\tprint >> sys.stdout, \"*** START processing\"\n\n\tfig = plt.gcf()\n\n\tlogging.basicConfig(level=logging.DEBUG)\n\t\n\tif (options.verbose):\n\t\tprint >> sys.stdout, \"** Create directories\"\n\n\tif not os.path.exists(options.tmpDir):\n\t\tos.mkdir(options.tmpDir)\n\n\tif not os.path.exists(options.outputDir):\n\t\tos.mkdir(options.outputDir)\n\t\n\tif (options.verbose):\n\t\tprint >> sys.stdout, \"** Create data objects\"\n\n\tmapped_reads = h5dict.h5dict(options.outputDir+'mapped_reads.hdf5')\n\tgenome_db = genome.Genome(options.genome, gapFile=options.gapFile, chrmFileTemplate='%s.fa',)\n\n#\tbams = []\n#\tif (options.inputFormat != 'bam'):\n#\t\tbams = mapFiles()\n#\telse:\n#\t\tbams = args[0:]\n\n\tif (options.verbose):\n\t\tprint >> sys.stdout, \"** Collect mapped reads\"\n\t\t\n#\tcollectMappedReads(bams[0], bams[1], mapped_reads, genome_db)\n\t\n\tif (options.verbose):\n\t\tprint >> sys.stdout, \"** Filter fragments\"\n\t\n\tfragments = filterFragments(genome_db)\n\t\n\tif (options.verbose):\n\t\tprint >> sys.stdout, \"** Iterative filtering of fragments\"\n\n\titerativeFiltering(genome_db, fragments)\n\t\n\tif (options.verbose):\n\t\tprint >> sys.stdout, \"*** FINISHED processing\"\n\t\n\tfig.savefig(options.outputDir+options.experiment+'.pdf')\t\n\t\n######################################\n# main\n######################################\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.6326748728752136, "alphanum_fraction": 0.653497040271759, "avg_line_length": 30.21666717529297, "blob_id": "da644a5c8f090365ea04e2dcb60ba7157707c659", "content_id": "420b46a63a866e1b0eab1929316d3d47fdcde826", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1873, "license_type": "no_license", "max_line_length": 209, "num_lines": 60, "path": "/Annotations/getCpG.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\n# Script to obtain CpG islands track from UCSC.\n# author: Fabian Buske\n# date: October 2013\n\nfunction usage {\necho -e \"usage: $(basename $0) -o OUTDIR GENOME SHORESIZE\n* GENOME : genome assembly to use, default hg19 \n* SHORESIZE : size of the shores flanking CpG islands, default 2000bp \n-o OUTDIR : output location: default <GENOME>/\n-g MAKEGTF : flag indicates that a gtf file should be created from the track\n\"\n\nexit\n}\n\nif [ ! $# -ge 2 ]; then usage ; fi\n\nGENOME=hg19\nSHORESIZE=2000\nOUTDIR=\nMAKEGTF=\n\n#INPUTS \nwhile getopts \"o:\" opt;\ndo\n\tcase ${opt} in\n\t\to) OUTDIR=\"$OPTARG\";;\n\t\t\\?) print >&2 \"$0: error - unrecognized option $1\"\n\t\texit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nGENOME=$1\nSHORESIZE=$2\n\nmodule load gi/ucsc_utils/283 gi/bedtools/2.17.0\n\nif [ -z \"$OUTDIR\" ]; then\n OUTDIR=$GENOME\nfi\n\nmkdir -p $OUTDIR\n\nmysql --user=genome --host=genome-mysql.cse.ucsc.edu -A -e \"select chrom, chromStart, chromEnd from $GENOME.cpgIslandExt\" | tail -n+2| awk '{OFS=\"\\t\"; print $1,$2,$3,\"CpGisland_\"NR}' > ${OUTDIR}/CpGislands.bed\n\nmysql --user=genome --host=genome-mysql.cse.ucsc.edu -A -e \"select chrom, size from $GENOME.chromInfo\" > ${OUTDIR}/genome\n\t\nbedtools slop -b $SHORESIZE -g ${OUTDIR}/genome -i ${OUTDIR}/CpGislands.bed > ${OUTDIR}/CpGislands$SHORESIZE.bed\n\nbedtools subtract -a ${OUTDIR}/CpGislands$SHORESIZE.bed -b ${OUTDIR}/CpGislands.bed | bedtools sort | bedtools merge | awk '{OFS=\"\\t\";print $1,$2,$3,\"CpGshore_\"NR}'> ${OUTDIR}/CpGshores.bed\n\nrm ${OUTDIR}/CpGislands$SHORESIZE.bed ${OUTDIR}/genome\n\nif [ \"$MAKEGTF\" = \"TRUE\" ]; then\n bedToGenePred ${OUTDIR}/CpGislands.bed stdout | genePredToGtf file stdin ${OUTDIR}/CpGislands.gtf\n bedToGenePred ${OUTDIR}/CpGshores.bed stdout | genePredToGtf file stdin ${OUTDIR}/CpGshores.gtf\nfi\n" }, { "alpha_fraction": 0.5911291837692261, "alphanum_fraction": 0.5959713459014893, "avg_line_length": 31.87898063659668, "blob_id": "4bada284e64f8a6989617a187169fab19c5cabb6", "content_id": "03e1a32459ca12a4c7c3548be1344429d58928bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5163, "license_type": "no_license", "max_line_length": 266, "num_lines": 157, "path": "/ChIP-QC/NCIS/runNCSI.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nUSAGEMSG=\"usage: $(basename $0) -g gagriChipSeqDir -w workingDir -N jobname -p 'gsub parameters' -d -f -v CHIP INPUT\n\nStarts the Normalization for ChIP-Seq (NCBI) pipeline.\n\nAuthor: Fabian Buske\n\nRequirements (in PATH environment or specified):\n R with ShortRead library available\n\n* CHIP - the chip data (bam expected)\n* CONTROL - the input/control data (bam expected)\n* -g data source directory - directory on gagri that points to the ChIP-seq data folders\n* -w working directory - directory to put all the data, scripts, results\n* -n NCIS source - source of the NCIS R script\n* -N job name - give this job a name of your choicea\n* -p parameters - cluster job parameters to use when submitting via qsub\n* -f force - overwrite existing results\n* -d dry - write scripts but dont trigger R job\n* -v - print progress information (verbose).\n\"\n\n\nDIR=$(dirname $0)\nVERSION=\"0.0.1\"\n\n[ $# -lt 2 ] && echo \"$USAGEMSG\" >&2 && exit 1\n\nCHIP=\"\"\nCONTROL=\"\"\nGAGRI=/Cancer-Epigenetics/Data/ClarkLab/Seq/ChIP-Seq/hg19/\nNCIS=${DIR}/NCIS.R\nWORKINGDIR=$PWD\nDRYRUN=\"FALSE\"\nFORCE=\"FALSE\"\nNCORES=1\nVERBOSE=\"--quiet\"\nJOBNAME=\"\"\nJOBPARAMS=\"-l h_vmem=25G,virtual_free=15G\"\n\nwhile getopts \"g:w:n:N:p:dfv\" opt;\ndo\n case ${opt} in\n g) GAGRI=\"$OPTARG\";;\n n) NCIS=\"$OPTARG\";;\n\tN) JOBNAME=\"$OPTARG\";;\n\tp) JOBPARAMS=\"$OPTARG\";;\n w) WORKINGDIR=\"$OPTARG\";;\n\td) DRYRUN=\"TRUE\";;\n\tf) FORCE=\"TRUE\";;\n v) VERBOSE=\"--verbose\";;\n \\?) print >&2 \"$0: error - unrecognized option $1\"\n exit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nCHIP=$1\nCONTROL=$2\n\nif [ -z ${JOBNAME} ]; then\n JOBNAME=\"NCIS-${CHIP}-${CONTROL}\"\nfi\n\nif [ ! -d ${WORKINGDIR} ]; then\n mkdir -p ${OUTPUT}\nelse\n echo \"[WARN] working directory already exists, content will be overwritten\" \nfi\n\nDATA=${WORKINGDIR}/data/\nRESULT=${WORKINGDIR}/results/\nBIN=${WORKINGDIR}/bin/\nLOG=${WORKINGDIR}/log/\n\nmkdir -p ${DATA} ${BIN} ${RESULT} ${LOG}\n\n##\n## Check if results exists already or existing results are to be overwritten\n##\n\nif [ ${FORCE} = \"FALSE\" ] && [ -f ${RESULT}${CHIP}-${CONTROL}.txt ]; then\n echo \"[NOTE] Results already exist: ${RESULT}${CHIP}_${CONTROL}.RData\" >> ${LOG}/${JOBNAME}.log\n [ ${VERBOSE} = \"--verbose\" ] && tail -n 1 ${LOG}/${JOBNAME}.log\n exit 0\nfi\n\n##\n## make log\n##\n\necho \"ChIPseq QC : v${VERSION}\" > ${LOG}/${JOBNAME}.log\necho \"Jobname : $JOBNAME\" >> ${LOG}/${JOBNAME}.log\necho \"chip : $CHIP\" >> ${LOG}/${JOBNAME}.log\necho \"control : $CONTROL\" >> ${LOG}/${JOBNAME}.log\necho \"gagri : $GAGRI\" >> ${LOG}/${JOBNAME}.log\n\necho \"working dir: $WORKINGDIR\" >> ${LOG}/${JOBNAME}.log\necho \"data : $DATA\" >> ${LOG}/${JOBNAME}.log\necho \"scripts : $BIN\" >> ${LOG}/${JOBNAME}.log\necho \"result : $RESULT\" >> ${LOG}/${JOBNAME}.log\necho \"logs : $LOG\" >> ${LOG}/${JOBNAME}.log\necho \"dry-run : $DRYRUN\" >> ${LOG}/${JOBNAME}.log\necho \"force : $FORCE\" >> ${LOG}/${JOBNAME}.log\n\nif [ ${VERBOSE} = \"--verbose\" ]; then\n cat ${LOG}/${JOBNAME}.log\nfi\n\n##\n## Check if data already existing\n##\n\nif [ ! -f ${DATA}${CHIP}.bam ]; then\n echo \"** get ${CHIP} data from gagri\" >> ${LOG}/${JOBNAME}.log\n if [ -f ~/.smbclient ]; then\n smbclient \\\\\\\\gagri\\\\GRIW -A ~/.smbclient -c \"cd ${GAGRI}/${CHIP}; get ${CHIP}.bam\" && mv ${CHIP}.bam ${DATA}\n else\n smbclient \\\\\\\\gagri\\\\GRIW -U `whoami` -c \"cd ${GAGRI}/${CHIP}; get ${CHIP}.bam\" && mv ${CHIP}.bam ${DATA}\n fi\nfi\n\nif [ ! -f ${DATA}${CONTROL}.bam ]; then\n echo \"** get ${CONTROL} data from gagri\" >> ${LOG}/${JOBNAME}.log\n if [ -f ~/.smbclient ]; then\n smbclient \\\\\\\\gagri\\\\GRIW -A ~/.smbclient -c \"cd ${GAGRI}/${CONTROL}; get ${CONTROL}.bam\" && mv ${CONTROL}.bam ${DATA}\n else\n smbclient \\\\\\\\gagri\\\\GRIW -U `whoami` -c \"cd ${GAGRI}/${CONTROL}; get ${CONTROL}.bam\" && mv ${CONTROL}.bam ${DATA}\n fi\nfi\n\n##\n## write R script\n##\n\necho \"** write R script\" >> ${LOG}/${JOBNAME}.log\necho \"source('${NCIS}')\" > ${BIN}/${CHIP}-${CONTROL}.R\necho \"library(ShortRead)\" >> ${BIN}/${CHIP}-${CONTROL}.R\necho \"input <- readAligned(dirPath = '${DATA}', pattern ='${CONTROL}', type = 'BAM')\" >> ${BIN}/${CHIP}-${CONTROL}.R\necho \"chip <- readAligned(dirPath = '${DATA}', pattern = '${CHIP}', type = 'BAM')\" >> ${BIN}/${CHIP}-${CONTROL}.R\necho \"res <- NCIS(chip, input, data.type='AlignedRead')\" >> ${BIN}/${CHIP}-${CONTROL}.R\necho \"write.table(res, file='${RESULT}${CHIP}-${CONTROL}.txt', sep='\\t')\" >> ${BIN}/${CHIP}-${CONTROL}.R\n\n##\n## submit script to cluster\n##\n\nif [ ${DRYRUN} = \"TRUE\" ]; then\n\n echo \"qsub -pe smp $NCORES -V -cwd -j y -m e -o ${LOG} ${JOBPARAMS} -e ${LOG} -N ${JOBNAME} -M `whoami`@garvan.unsw.edu.au -wd ${WORKINGDIR} -b y /share/ClusterShare/software/contrib/Cancer-Epigenetics/tools/bin/Rscript --quiet '${BIN}${CHIP}-${CONTROL}.R'\" >> ${LOG}/${JOBNAME}.log\n tail -n 1 ${LOG}/${JOBNAME}.log\n\nelse\n echo \"** submit job\" >> ${LOG}/${JOBNAME}.log\n qsub -pe smp $NCORES -V -cwd -j y -m e -o ${LOG} ${JOBPARAMS} -e ${LOG} -N ${JOBNAME} -M `whoami`@garvan.unsw.edu.au -wd ${WORKINGDIR} -b y /share/ClusterShare/software/contrib/Cancer-Epigenetics/tools/bin/Rscript --quiet \"${BIN}${CHIP}-${CONTROL}.R\"\nfi\n\n" }, { "alpha_fraction": 0.6040236949920654, "alphanum_fraction": 0.6121335029602051, "avg_line_length": 34.419891357421875, "blob_id": "5d2fbcc02f396d011f58b651cd164f1447d2fe3e", "content_id": "802593d7271ed109e7ef6e784466c1c76a9bb085", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 6412, "license_type": "no_license", "max_line_length": 250, "num_lines": 181, "path": "/ChIP-QC/CHANCE/runChance.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nUSAGEMSG=\"usage: $(basename $0) -g gagriChipSeqDir -w workingDir -N jobname -p 'gsub parameters' -d -f -v CHIP INPUT\n\nStarts the Normalization for ChIP-Seq (CHANCE) pipeline.\n\nAuthor: Fabian Buske\n\nRequirements (in PATH environment or specified):\n\tmatlab compiler runtime 2012b (module fabbus/matlab/mcr2012b)\n\n* CHIP - the chip data (bam expected)\n* CONTROL - the input/control data (bam expected)\n* -g data source directory - directory on gagri that points to the ChIP-seq data folders\n* -b genome build - genome build i.e. mm9, hg18 or hg19 (default)\n* -e experiment id - see hg19_experiment_list.txt and mm9_experiment_list.txt for valid experiment ids\n* -w working directory - directory to put all the data, scripts, results\n* -N job name - give this job a name of your choicea\n* -p parameters - cluster job parameters to use when submitting via qsub\n* -f force - overwrite existing results\n* -d dry - write scripts but dont trigger job\n* -v - print progress information (verbose).\n\"\n\nDIR=$(dirname $0)\nVERSION=\"0.0.1\"\n\n[ $# -lt 2 ] && echo \"$USAGEMSG\" >&2 && exit 1\n\nCHIP=\"\"\nCONTROL=\"\"\nGAGRI=/Cancer-Epigenetics/Data/ClarkLab/Seq/ChIP-Seq/hg19/\nCHANCE=\"/share/ClusterShare/software/contrib/fabbus/chance/com/run_chance_com.sh /share/ClusterShare/software/contrib/fabbus/matlab/mcr2012b/v80\"\nBUILD=\"hg19\"\nEXPERIMENTID=\"\"\nWORKINGDIR=$(pwd)\nDRYRUN=\"FALSE\"\nFORCE=\"FALSE\"\nNCORES=1\nVERBOSE=\"--quiet\"\nJOBNAME=\"\"\nJOBPARAMS=\"-l h_vmem=25G,virtual_free=10G\"\n\nwhile getopts \"g:b:e:w:N:p:dfv\" opt;\ndo\n\tcase ${opt} in\n\t\tg) GAGRI=\"$OPTARG\";;\n\t\tb) BUILD=\"$OPTARG\";;\n\t\te) EXPERIMENTID=\"$OPTARG\";;\n\t\tN) JOBNAME=\"$OPTARG\";;\n\t\tp) JOBPARAMS=\"$OPTARG\";;\n\t\tw) WORKINGDIR=\"$OPTARG\";;\n\t\td) DRYRUN=\"TRUE\";;\n\t\tf) FORCE=\"TRUE\";;\n\t\tv) VERBOSE=\"--verbose\";;\n\t\t\\?) print >&2 \"$0: error - unrecognized option $1\"\n\t\texit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nCHIP=$1\nCONTROL=$2\n\nif [ -z ${JOBNAME} ]; then\n JOBNAME=\"CHANCE-${CHIP}-${CONTROL}\"\nfi\n\nif [ ! -d ${WORKINGDIR} ]; then\n mkdir -p ${OUTPUT}\nelse\n echo \"[WARN] working directory already exists, content will be overwritten\" \nfi\n\nDATA=${WORKINGDIR}/data/\nRESULT=${WORKINGDIR}/results/\nBIN=${WORKINGDIR}/bin/\nLOG=${WORKINGDIR}/log/\n\nmkdir -p ${DATA} ${BIN} ${RESULT} ${LOG}\n\n##\n## Check if results exists already or existing results are to be overwritten\n##\n\nif [ ${FORCE} = \"FALSE\" ] && [ -f ${RESULT}${CHIP}-${CONTROL}.IPstrength ]; then\n echo \"[NOTE] Results already exist: ${RESULT}${CHIP}_${CONTROL}.RData\" >> ${LOG}/${JOBNAME}.log\n [ ${VERBOSE} = \"--verbose\" ] && tail -n 1 ${LOG}/${JOBNAME}.log\n exit 0\nfi\n\n##\n## make log\n##\n\necho \"ChIPseq QC : v${VERSION}\"\t\t> ${LOG}/${JOBNAME}.log\necho \"Jobname : $JOBNAME\"\t\t>> ${LOG}/${JOBNAME}.log\necho \"chip : $CHIP\"\t\t>> ${LOG}/${JOBNAME}.log\necho \"control : $CONTROL\"\t\t>> ${LOG}/${JOBNAME}.log\necho \"experiment : $EXPERIMENTID\"\t>>${LOG}/${JOBNAME}.log\necho \"build : $BUILD\"\t\t>> ${LOG}/${JOBNAME}.log\necho \"gagri : $GAGRI\"\t\t>> ${LOG}/${JOBNAME}.log\n\necho \"working dir: $WORKINGDIR\"\t\t>> ${LOG}/${JOBNAME}.log\necho \"data : $DATA\"\t\t>> ${LOG}/${JOBNAME}.log\necho \"scripts : $BIN\"\t\t>> ${LOG}/${JOBNAME}.log\necho \"result : $RESULT\" \t>> ${LOG}/${JOBNAME}.log\necho \"logs : $LOG\"\t\t>> ${LOG}/${JOBNAME}.log\necho \"dry-run : $DRYRUN\" \t>> ${LOG}/${JOBNAME}.log\necho \"force : $FORCE\"\t\t>> ${LOG}/${JOBNAME}.log\n\nif [ ${VERBOSE} = \"--verbose\" ]; then\n cat ${LOG}/${JOBNAME}.log\nfi\n\n##\n## Check if data already existing\n##\n\nif [ ! -f ${DATA}${CHIP}.bam ]; then\n echo \"** get ${CHIP} data from gagri\" >> ${LOG}/${JOBNAME}.log\n if [ -f ~/.smbclient ]; then\n smbclient \\\\\\\\gagri\\\\GRIW -A ~/.smbclient -c \"cd ${GAGRI}/${CHIP}; get ${CHIP}.bam\" && mv ${CHIP}.bam ${DATA}\n else\n smbclient \\\\\\\\gagri\\\\GRIW -U `whoami` -c \"cd ${GAGRI}/${CHIP}; get ${CHIP}.bam\" && mv ${CHIP}.bam ${DATA}\n fi\nfi\n\nif [ ! -f ${DATA}${CONTROL}.bam ]; then\n echo \"** get ${CONTROL} data from gagri\" >> ${LOG}/${JOBNAME}.log\n if [ -f ~/.smbclient ]; then\n smbclient \\\\\\\\gagri\\\\GRIW -A ~/.smbclient -c \"cd ${GAGRI}/${CONTROL}; get ${CONTROL}.bam\" && mv ${CONTROL}.bam ${DATA}\n else\n smbclient \\\\\\\\gagri\\\\GRIW -U `whoami` -c \"cd ${GAGRI}/${CONTROL}; get ${CONTROL}.bam\" && mv ${CONTROL}.bam ${DATA}\n fi\nfi\n\n##\n## write shell script\n##\n\necho \"** write shell script\" >> ${LOG}/${JOBNAME}.log\necho \"#!/bin/sh\" > ${BIN}/${CHIP}-${CONTROL}.sh\necho \"source /etc/profile.d/modules.sh\" >> ${BIN}/${CHIP}-${CONTROL}.sh\n\n## binData mode not crucial\n#echo \"${CHANCE} binData -b ${BUILD} -t bam -s ${CHIP} -o ${DATA}/${CHIP}.mat -f ${DATA}/${CHIP}.bam\" >> ${BIN}/${CHIP}-${CONTROL}.sh\n#echo \"${CHANCE} binData -b ${BUILD} -t bam -s ${CONTROL} -o ${DATA}/${CONTROL}.mat -f ${DATA}/${CONTROL}.bam\" >> ${BIN}/${CHIP}-${CONTROL}.sh\n\necho \"** compute IPstrength\" >> ${LOG}/${JOBNAME}.log\n\n#echo \"${CHANCE} IPStrength -b ${BUILD} -t bam -o ${RESULT}/${CHIP}-${CONTROL}.IPstrength --ipfile ${DATA}/${CHIP}.bam --ipsample ${CHIP} --inputfile ${DATA}/${CONTROL}.bam --inputsample ${CONTROL}\" >> ${BIN}/${CHIP}-${CONTROL}.sh\n\n#if [ -n \"${EXPERIMENTID}\" ]; then\n#\techo \"${CHANCE} compENCODE -b ${BUILD} -t bam -o ${RESULT}/${CHIP}-${CONTROL}.compENCODE -e ${EXPERIMENTID} --ipfile ${DATA}/${CHIP}.bam --ipsample ${CHIP} --inputfile ${DATA}/${CONTROL}.bam --inputsample ${CONTROL}\" >> ${BIN}/${CHIP}-${CONTROL}.sh\n#fi\n\n# echo \"${CHANCE} spectrum -b ${BUILD} -t bam -s ${CHIP} -o ${RESULT}/${CHIP}.spectrum -f ${DATA}/${CHIP}.bam\" >> ${BIN}/${CHIP}-${CONTROL}.sh\n\n\n# get plots using R\n#echo \"Rscript --vanilla --quiet ${DIR}/chance_plots.R ${DATA}/${CHIP}.bam ${DATA}/${CONTROL}.bam ${RESULT}\" >> ${BIN}/${CHIP}-${CONTROL}.sh\necho \"${DIR}/makeHTMLSummary.sh ${RESULT}/${CHIP}-${CONTROL}.IPstrength ${RESULT}/${CHIP}.png ${RESULT}/${CHIP}-${CONTROL}.compENCODE\" >> ${BIN}/${CHIP}-${CONTROL}.sh\n\necho \"** finished shell script\" >> ${LOG}/${JOBNAME}.log\n\nchmod 777 ${BIN}/${CHIP}-${CONTROL}.sh\n\n##\n## submit script to cluster\n##\n\nif [ ${DRYRUN} = \"TRUE\" ]; then\n\n echo \"qsub -pe smp $NCORES -V -j y -m e -o ${LOG} ${JOBPARAMS} -e ${LOG} -N ${JOBNAME} -M `whoami`@garvan.unsw.edu.au -wd ${WORKINGDIR} -b y ${BIN}${CHIP}-${CONTROL}.sh\" >> ${LOG}/${JOBNAME}.log\n tail -n 1 ${LOG}/${JOBNAME}.log\n\nelse\n echo \"** submit job\" >> ${LOG}/${JOBNAME}.log\n qsub -pe smp $NCORES -V -j y -m e -o ${LOG} ${JOBPARAMS} -e ${LOG} -N ${JOBNAME} -M `whoami`@garvan.unsw.edu.au -wd ${WORKINGDIR} -b y ${BIN}${CHIP}-${CONTROL}.sh\nfi\n\n" }, { "alpha_fraction": 0.6484055519104004, "alphanum_fraction": 0.6606704592704773, "avg_line_length": 34.97058868408203, "blob_id": "09698f449c47a35fc5e32a97d146774c48403d9b", "content_id": "8e39c191bcf45a41e7692cc6311de6e572694982", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1223, "license_type": "no_license", "max_line_length": 151, "num_lines": 34, "path": "/segway/bak/4_train_model.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nDIR=`dirname $0`\nsource ${DIR}/0_config.sh\n\n## load module\n\nmodule load fabbus/segway/1.1.0\n\n##\n## train Seqway models\n##\n\nif [ -n \"$DO_TRAINSEGWAY\" ]; then\n\t[ -f ${SEGWAY_QOUT}sgtrn4M${EXPERIMENT}.out ] && rm ${SEGWAY_QOUT}sgtrn4M${EXPERIMENT}.out\n\t[ -d ${SEGWAY_TRAIN} ] && rm -r ${SEGWAY_TRAIN}\n\n\techo 'echo job_id $JOB_ID startdata $(date)' > ${SEGWAY_BIN}segtrain${EXPERIMENT}.sh\n\tOPTIONS=\"--include-coords=${TRAIN_REGIONS} --num-labels=${LABELS} --num-instances=${INSTANCES} ${SPECIAL}\"\n\techo \"segway $OPTIONS \\\\\">> ${SEGWAY_BIN}segtrain${EXPERIMENT}.sh \n\n\t# add the --track <ID> sections\n\tfor f in $(ls $SEGWAY_DATA/*.bedgraph.gz ); do\n\t\tb=$(basename $f)\n\t\tarrIN=(${b//./ })\n\techo \"--track \"${arrIN[0]}\" \\\\\" >> ${SEGWAY_BIN}segtrain${EXPERIMENT}.sh\n\tdone\n\techo \"train ${SEGWAY_DATA}${EXPERIMENT}.genomedata ${SEGWAY_TRAIN}\" >> ${SEGWAY_BIN}segtrain${EXPERIMENT}.sh\n\techo 'echo job_id $JOB_ID ending $(date)' >> ${SEGWAY_BIN}segtrain${EXPERIMENT}.sh\n\tchmod 777 ${SEGWAY_BIN}segtrain${EXPERIMENT}.sh\n\t# submit\n\t#qsub -l mem_requested=16G -V -cwd -b y -j y -o ${SEGWAY_QOUT}/sgtrn4M${EXPERIMENT}.out -N sgtrn4M${EXPERIMENT} ${SEGWAY_BIN}/segtrain${EXPERIMENT}.sh\n\t${SEGWAY_BIN}segtrain${EXPERIMENT}.sh\nfi\n" }, { "alpha_fraction": 0.5784903168678284, "alphanum_fraction": 0.6005343794822693, "avg_line_length": 24.810344696044922, "blob_id": "b169da8e3bf35ba146074f2adb22df7f2bceba06", "content_id": "39e864d6883eb4eda45dbea2d1fe632e28805fa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1497, "license_type": "no_license", "max_line_length": 209, "num_lines": 58, "path": "/Annotations/getRepeats.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\n# Script to obtain Repeat masked tracks from UCSC.\n# author: Fabian Buske\n# date: October 2013\n\nfunction usage {\necho -e \"usage: $(basename $0) [OPTIONS] GENOME SHORESIZE\n* GENOME : genome assembly to use, default hg19 \n-o OUTDIR : output location: default <GENOME>/\n-g MAKEGTF : flag indicates that a gtf files should be created from the tracks\n\"\n\nexit\n}\n\nif [ ! $# -ge 1 ]; then usage ; fi\n\nGENOME=hg19\nOUTDIR=\nMAKEGTF=\n\n#INPUTS \nwhile getopts \"go:\" opt;\ndo\n\tcase ${opt} in\n\t\to) OUTDIR=\"$OPTARG\";;\n\t\tg) MAKEGTF=\"TRUE\";;\n\t\t\\?) print >&2 \"$0: error - unrecognized option $1\"\n\t\texit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nGENOME=$1\n\nmodule load gi/ucsc_utils/283 gi/bedtools/2.17.0\n\nif [ -z \"$OUTDIR\" ]; then\n OUTDIR=$GENOME\nfi\n\nmkdir -p $OUTDIR\n\nmysql --user=genome --host=genome-mysql.cse.ucsc.edu -A -e \"select genoName, genoStart, genoEnd, repClass from $GENOME.rmsk\" | tail -n+2 | awk '{OFS=\"\\t\";print $1,$2,$3,\"Repeat_\"NR,$4}' > ${OUTDIR}/Repeats.bed\n\n\nfor REPEAT in $(awk '{print $5}' ${OUTDIR}/Repeats.bed | grep -v '\\?' | sort -u | tr '\\n' ' '); do\n\n\tgrep \"$REPEAT\" ${OUTDIR}/Repeats.bed | awk '{OFS=\"\\t\"; print $1,$2,$3,$4}' > ${OUTDIR}/$REPEAT.bed\n\tif [ \"$MAKEGTF\" = \"TRUE\" ]; then\n \tbedToGenePred ${GENOME}/$REPEAT.bed stdout | genePredToGtf file stdin ${OUTDIR}/$REPEAT.gtf\n\tfi\ndone\n\nmv $OUTDIR/Unknown.bed $OUTDIR/Unknown_repeat.bed\n\nrm ${OUTDIR}/Repeats.bed\n" }, { "alpha_fraction": 0.7637130618095398, "alphanum_fraction": 0.797468364238739, "avg_line_length": 32.761905670166016, "blob_id": "b8e101637ddc48f59510b212ef3b43565abed6bd", "content_id": "a67794472abbf2c94040acf113e5629d01ca9346", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 711, "license_type": "no_license", "max_line_length": 85, "num_lines": 21, "path": "/HiC/config_example.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\n# target folder where all operation/results should operate on/in\nTARGETDIR=/home/fabbus/research/HiC/\n\n# folder containing the original data dolder \nRAW_FILES_SOURCE=/Cancer-Epigenetics-Data/HiC_RAW/\nMAPPED_FILES_SOURCE=/Cancer-Epigenetics/Data/ClarkLab/HiC/Seq/HiC\n\n# data files (tracks) - separate replicas by comma\nFASTQ=\"TKCC20130321_HiC_LNCaP_1 TKCC20130321_HiC_LNCaP_2\"\nBAMFILES=\"\"\n\n# experimental identifier\nEXPERIMENT=\"LnCAP\"\n\n# genome info\nGENOME=\"hg19\"\n#GENOMESEQ=/share/ClusterShare/biodata/galaxy_main/hg19/seq/${GENOME}.fa\nCHROMSIZES=/share/ClusterShare/biodata/contrib/fabbus/encodeDCC/male.hg19.chrom.sizes\nSEQDIR=/share/ClusterShare/biodata/contrib/fabbus/encodeDCC/maleByChrom/\n\n\n" }, { "alpha_fraction": 0.6508474349975586, "alphanum_fraction": 0.6702179312705994, "avg_line_length": 85.04166412353516, "blob_id": "0a7eea00d7de56acf766ca541b94576207c11c26", "content_id": "2529e23306817f75f6de93d7669b2ff2e13e41d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 4130, "license_type": "no_license", "max_line_length": 486, "num_lines": 48, "path": "/ChIP-QC/CHANCE/aggregate.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\necho \"ChIP\tControl\tNormalization\tEnriched\tCumulative\tFDR\" > CHANCE_aggregate.txt\n\nfor f in `ls results/*.IPstrength`; do\n\n\tb=$(basename $f)\n t=(${b//./ })\n\tchip=(${t%-*})\n\tinput=(${t##*-})\n\n\tFDR=$(cat $f | grep \"^fdr,\" | awk -F\\, '{print $2}')\n\tNORM=$(cat $f | grep \"input_scaling_factor\" | awk -F\\, '{print $2}')\n\tENRICHED=$(cat $f | grep \"percent_genome_enriched\" | awk -F\\, '{print $2}')\n\tCUMMULATIVE=$(cat $f | grep \"differential_percentage_enrichment\" | awk -F\\, '{print $2}')\n\n echo \"${chip}\t${input}\t${NORM}\t${ENRICHED}\t${CUMMULATIVE}\t${FDR}\" >> CHANCE_aggregate.txt\ndone\n\necho \"library(gridExtra)\" > CHANCE_aggregate.R\necho \"library(ggplot2)\" >> CHANCE_aggregate.R\necho \"library(scales)\" >> CHANCE_aggregate.R\necho \"data <- read.delim('CHANCE_aggregate.txt', head=TRUE)\" >> CHANCE_aggregate.R\necho \"data <- cbind(data, 'BackgroundRatio'=0)\" >> CHANCE_aggregate.R\necho \"data['BackgroundRatio'] <- (100 - data['Cumulative'])/100\" >> CHANCE_aggregate.R\necho \"ChIP_cell <- sapply(strsplit(as.character(data[,1]), '_'), '[[', 2)\" >> CHANCE_aggregate.R\necho \"Control_cell <- sapply(strsplit(as.character(data[,2]), '_'), '[[', 2)\" >> CHANCE_aggregate.R\necho \"ChIP_mark <- sapply(strsplit(as.character(data[,1]), '_'), '[[', 3)\" >> CHANCE_aggregate.R\necho \"Control_mark <- sapply(strsplit(as.character(data[,2]), '_'), '[[', 3)\" >> CHANCE_aggregate.R\necho \"data <- cbind(data, ChIP_cell, Control_cell, ChIP_mark, Control_mark)\" >> CHANCE_aggregate.R\necho \"data <- data[with(data, order(ChIP_cell, ChIP_mark, Control_cell, Control_mark)),]\" >> CHANCE_aggregate.R\necho \"data[,1] <- factor(data[,1], levels=unique(data[,1]))\" >> CHANCE_aggregate.R\necho \"data[,2] <- factor(data[,2], levels=unique(data[,2]))\" >> CHANCE_aggregate.R\necho \"data['FDR'] <- sapply(data['FDR'], log10)\" >> CHANCE_aggregate.R\necho \"data['FDR'] <- -data['FDR'] \" >> CHANCE_aggregate.R\necho \"fmt <- '%.2f'\" >> CHANCE_aggregate.R\necho \"p1 <- ggplot(data, aes(Control,ChIP)) + geom_tile(aes(fill = BackgroundRatio), colour = 'white') + scale_fill_gradient2(limits=c(0.25, 1.),midpoint=0.63, high=muted('red'), mid='yellow', low=muted('steelblue'), na.value='steelblue') + theme(axis.text.x = element_text(angle = 45, hjust=1, size=8), axis.text.y=element_text(size=8)) + theme(legend.direction = 'horizontal', legend.position = 'top')+geom_text(aes(label=sprintf(fmt, BackgroundRatio)),size=2)\" >> CHANCE_aggregate.R\necho \"p2 <- ggplot(data, aes(Control,ChIP)) + geom_tile(aes(fill = Normalization), colour = 'white') + scale_fill_gradient2(name='input scaling factor', low = 'steelblue', mid='yellow', high = 'red') + theme(axis.text.x = element_text(angle = 45, hjust = 1, size=8), axis.text.y=element_text(size=8)) + theme(legend.direction = 'horizontal', legend.position = 'top')+geom_text(aes(label=sprintf(fmt, Normalization)),size=2)\" >> CHANCE_aggregate.R\necho \"p3 <- ggplot(data, aes(x=Control,y=ChIP)) + geom_tile(aes(fill = Enriched), colour = 'white') + scale_fill_gradient2(name='percent genome enriched', midpoint=20, low = 'steelblue', mid='yellow', high = 'red') + theme(axis.text.x = element_text(angle = 45, hjust = 1, size=8), axis.text.y=element_text(size=8)) + theme(legend.direction = 'horizontal', legend.position = 'top')+geom_text(aes(label=sprintf(fmt, Enriched)),size=2)\" >> CHANCE_aggregate.R\necho \"p4 <- ggplot(data, aes(Control,ChIP,FDR)) + geom_tile(aes(fill = FDR), colour = 'white') + scale_fill_gradient2(name='-log10(FDR)', limits=c(0,3), midpoint=1.5, high=muted('steelblue'), mid='yellow', low=muted('red'), na.value='steelblue') + theme(axis.text.x = element_text(angle = 45, hjust=1, size=8), axis.text.y=element_text(size=8)) + theme(legend.direction = 'horizontal', legend.position = 'top')+geom_text(aes(label=sprintf(fmt, FDR)),size=2)\" >> CHANCE_aggregate.R\necho \"pdf(file='CHANCE_aggregate.pdf')\" >> CHANCE_aggregate.R\necho \"p1\" >> CHANCE_aggregate.R\necho \"p2\" >> CHANCE_aggregate.R\necho \"p3\" >> CHANCE_aggregate.R\necho \"p4\" >> CHANCE_aggregate.R\necho \"dev.off()\" >> CHANCE_aggregate.R\n\n/share/ClusterShare/software/contrib/Cancer-Epigenetics/tools/bin/Rscript CHANCE_aggregate.R\n" }, { "alpha_fraction": 0.5916276574134827, "alphanum_fraction": 0.6077283620834351, "avg_line_length": 25.6875, "blob_id": "4379ae424a5cc1c2aa6cb8b35a225f995f16046b", "content_id": "25d601cbc7a700f0c937ddc672db80b0d7efdbd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3416, "license_type": "no_license", "max_line_length": 129, "num_lines": 128, "path": "/ChIP-QC/CHANCE/makeHTMLSummary.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nUSAGEMSG=\"usage: $(basename $0) IPstrength.output IPstrength.png ENCODE.output\n\nwrites a CHANCE ChIP-QC report in HTML\n\nAuthor: Fabian Buske\n\n* CHIP - the chip data (bam expected)\n* CONTROL - the input/control data (bam expected)\n* -v - print progress information (verbose).\n\"\n\nDIR=$(dirname $0)\nVERSION=\"0.0.1\"\n\n[ $# -lt 2 ] && echo \"$USAGEMSG\" >&2 && exit 1\n\nCHIP=\"\"\nCONTROL=\"\"\nBUILD=\"hg19\"\nVERBOSE=\"--quiet\"\n\nwhile getopts \"v\" opt;\ndo\n case ${opt} in\n v) VERBOSE=\"--verbose\";;\n \\?) print >&2 \"$0: error - unrecognized option $1\"\n exit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nIPSTRENGTH=$1\nIPIMAGE=$2\nENCODE=$3\n\nif [ ! -f $IPSTRENGTH ]; then\n\techo \"[ERROR] ChIP IPstrength output does not exist: $IPSTRENGTH\"\n\texit 1\nfi\nOUT=${IPSTRENGTH%.*}\".html\"\n\nif [ ! -f $IPIMAGE ]; then\n echo \"[WARN] ChIP IPstrength image does not exist: $IPIMAGE\"\nfi\nIPIMAGE=${IPIMAGE##*/}\n\nif [ ! -f $ENCODE ]; then\n echo \"[WARN] ChIP Encode output does not exist: $ENCODE\"\nfi\n\nCHIP=$(grep \"^IP_sample_id,\" $IPSTRENGTH | awk -F\\, '{print $2}')\nINPUT=$(grep \"^Input_sample_id,\" $IPSTRENGTH | awk -F\\, '{print $2}')\nOUTCOME=$(grep \"^pass,\" $IPSTRENGTH | awk -F\\, '{print $2}')\nif [ \"$OUTCOME\" = 0 ]; then\n\tOUTCOME=\"No\"\nelse\n\tOUTCOME=\"Yes\"\nfi\nSCALING=$(grep \"^input_scaling_factor,\" $IPSTRENGTH | awk -F\\, '{print $2}')\nENRICHMENT=$(grep \"^percent_genome_enriched,\" $IPSTRENGTH | awk -F\\, '{print $2}')\nDIFFENRICH=$(grep \"^differential_percentage_enrichment,\" $IPSTRENGTH | awk -F\\, '{print $2}')\nFDR=$(grep \"^fdr,\" $IPSTRENGTH | awk -F\\, '{print $2}')\nFDR_TFBS_NORMAL=$(grep \"^tfbs_normal_fdr,\" $IPSTRENGTH | awk -F\\, '{print $2}')\nFDR_HIST_NORMAL=$(grep \"^histone_normal_fdr,\" $IPSTRENGTH | awk -F\\, '{print $2}')\nFDR_TFBS_CANCER=$(grep \"^tfbs_cancer_fdr,\" $IPSTRENGTH | awk -F\\, '{print $2}')\nFDR_HIST_CANCER=$(grep \"^histone_cancer_fdr,\" $IPSTRENGTH | awk -F\\, '{print $2}')\n\n\necho \"<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Transitional//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'> \\\n<html xmlns='http://www.w3.org/1999/xhtml'> \\\n<html> \\\n<head><title>Chance output for $CHIP </title></head> \\\n<body> \\\nChIP: $CHIP<br/> \\\nInput: $INPUT<br/> \\\n<h3>IPstrength</h3>\\\n<table border='1' cellspacing='0' cellpadding='2'>\\\n<tr>\\\n\t<th>Significance</th>\\\n\t<th>Input scaling</th>\\\n\t<th>Genome enriched (%)</th>\\\n\t<th>Diff. enrichment (%)</th>\\\n\t<th>FDR (overall)</th>\\\n\t<th>FDR TFBS (normal)</th>\\\n\t<th>FDR Histone (normal)</th>\\\n <th>FDR TFBS (cancer)</th>\\\n\t<th>FDR Histone (normal)</th>\\\n</tr>\\\n<tr>\\\n\t<td>$OUTCOME</td>\\\n\t<td>$SCALING</td>\\\n\t<td>$ENRICHMENT</td>\\\n\t<td>$DIFFENRICH</td>\\\n\t<td>$FDR</td>\\\n\t<td>$FDR_TFBS_NORMAL</td>\\\n\t<td>$FDR_HIST_NORMAL</td>\\\n\t<td>$FDR_TFBS_CANCER</td>\\\n\t<td>$FDR_HIST_CANCER</td>\\\n</tr>\\\n</table>\\\n<img src='$IPIMAGE'>\" > $OUT\n\nif [ -f $ENCODE ]; then\nBUILD=$(grep \"build,\" $ENCODE | awk -F\\, '{print $2}')\nSNR=$(grep \"odds_ratio,\" $ENCODE | awk -F\\, '{print $2}')\nPROB=$(grep \"probability,\" $ENCODE | awk -F\\, '{print $2}')\n\necho \"\n<h3>Encode comparison</h3>\\\n<table border='1' cellspacing='0' cellpadding='2'>\\\n<tr>\\\n\t<th> Build </th>\\\n\t<th> Signal to noise ratio </th>\\\n\t<th> Probability* </th>\\\n</tr>\\\n<tr>\\\n\t<td>$BUILD</td>\\\n\t<td>$SNR</td>\\\n\t<td>$PROB</td>\\\n</tr>\\\n</table>\\\n*A small probability indicates your data differs greatly from ENCODE datasets\" >> $OUT\nfi\n\necho \"</body>\\\n</html>\" >> $OUT\n" }, { "alpha_fraction": 0.7935909628868103, "alphanum_fraction": 0.8190386295318604, "avg_line_length": 36.89285659790039, "blob_id": "ae6ccdadbe51a738d3e419234fdf5981a4550fb5", "content_id": "78b72c74191e8fa658716959e901fa076a2873d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2122, "license_type": "no_license", "max_line_length": 134, "num_lines": 56, "path": "/segway/config_example.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\n# target folder where all operation/results should operate on/in\nTARGETDIR=/share/ClusterScratch/fabbus/seqway_prec/\n\n# folder containing the original data dolder \nFILES_SOURCE=/Cancer-Epigenetics-Disco/ClarkLab/\n\n# Temp dir\nexport TMPDIR=$TARGETDIR/tmp\n# data files \n# tab separated file containing the following columns\n# Mark\tCellline\tFolder\tReplicates\tFILESUFFIX Fragmentsizes\tWiggler_smooting\nEXPERIMENTS=\"experiments.txt\"\n\n# experimental identifier\nEXPERIMENT=\"Prostate\"\n\n# Uncomment to use all track data defined in experiment, otherwise only use tracks matching $EXPERIMENT in the second column\n# USE_ALL_TRACK_DATA=\"1\"\n\n# genome info\nGENOME=\"hg19\"\n#GENOMESEQ=/share/ClusterShare/biodata/galaxy_main/hg19/seq/${GENOME}.fa\nCHROMSIZES=/share/ClusterShare/biodata/contrib/ENCODE/encodeDCC/referenceSequences/male.hg19.chrom.sizes\nSEQDIR=/share/ClusterShare/biodata/contrib/ENCODE/encodeDCC/referenceSequences/maleByChrom/\nFASTASUFFIX=\".fa\"\n\n# annotation data\nANNOTATION=/share/ClusterShare/biodata/contrib/GENCODE/release_19/gencode.v19.annotation.reduced.gtf\n\n## Segway parameters\nLABELS=15\n#REGIONSOURCE=http://hgdownload.cse.ucsc.edu/goldenPath/hg19/encodeDCC/referenceSequences/encodePilotRegions.hg19.bed\nREGIONS=/share/ClusterShare/software/contrib/Cancer-Epigenetics/Annotation/hg19/Encode/encodePilotRegions.hg19.bed\n\n#http://hgdownload.cse.ucsc.edu/goldenPath/hg19/encodeDCC/wgEncodeMapability/wgEncodeDacMapabilityConsensusExcludable.bed.gz\nEXCLUDABLE=/share/ClusterShare/software/contrib/Cancer-Epigenetics/Annotation/hg19/Encode/wgEncodeDacMapabilityConsensusExcludable.bed\n\n# additional options for segway \n# e.g. \"--cluster-opt='-pe smp'\"\nSEGWAY_OPTIONS=\"--cluster-opt='-pe smp'\"\n\nTRAIN_EXPERIMENT=PrEC\nSEGWAY_TRAIN_ADDPARAM=\"\"\nINSTANCES=3\n\n# wiggler\n# maps for 20-54 bp reads\nWIGGLER_UMAPDIR_lt100=/share/ClusterShare/biodata/contrib/fabbus/umap/hg19_male/globalmap_k20tok54/\n# maps for 100 bp reads\nWIGGLER_UMAPDIR_ge100=/share/ClusterShare/biodata/contrib/fabbus/umap/hg19_male/globalmap_k101tok101/\n\n\n# which set in the PredictOn column defined in EXPERIMENTS to work on\nPREDICTON=\"1\"\n" }, { "alpha_fraction": 0.726190447807312, "alphanum_fraction": 0.726190447807312, "avg_line_length": 19.75, "blob_id": "33199ebebb1ce996f5a42b9811b2a12cba1c2442", "content_id": "be598b0965c78327a199feba04ce6495fc9f28bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 84, "license_type": "no_license", "max_line_length": 61, "num_lines": 4, "path": "/README.md", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "pipelines\n=========\n\nawesome bioinformatic pipelines to be used on a linux cluster \n" }, { "alpha_fraction": 0.5368931889533997, "alphanum_fraction": 0.5631067752838135, "avg_line_length": 19.215686798095703, "blob_id": "62b6392b65cc734eb47fd0bbfc8943ca5180e90f", "content_id": "16019ec5f0e0aca930d50bc4e5f62c24f6ebc73b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1030, "license_type": "no_license", "max_line_length": 79, "num_lines": 51, "path": "/tools/calculate_checksums.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\n\nUSAGEMSG=\"usage: $(basename $0) FOLDER \n\ncalculates checksum for all files in FOLDER\n\nAuthor: Fabian Buske\n\n* FOLDER - target folder containing relevant files\n* -c FILE - verifies the files in the folder given the provided checksum FILE\n\"\n\nDIR=$(dirname $0)\nVERSION=\"0.0.1\"\n\n[ $# -lt 1 ] && echo \"$USAGEMSG\" >&2 && exit 1\n\nCHECKSUMS=\"\"\n\nwhile getopts \"c:\" opt;\ndo\n case ${opt} in\n c) CHECKSUMS=\"$OPTARG\";;\n \\?) print >&2 \"$0: error - unrecognized option $1\"\n exit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nFOLDER=$1\n\nif [ -n \"$CHECKSUMS\" ]; then\n echo \"checking files\"\n while read MD5 FILE\n do\n MD5new=$(md5 -r $FILE | awk '{print $1}')\n if [[ \"$MD5\" != \"$MD5new\" ]]; then\n echo \"[ERROR] $FILE md5 mismatch: $MD5new should have been $MD5\"\n else\n echo \"[OK] $FILE verified\"\n fi\n \n done < $CHECKSUMS\n\nelse\n for f in $(find $FOLDER -type f | grep -v \".DS_Store\" | grep -v \".md5\"); do\n MD5=$(md5 -r $f)\n echo $MD5\n done\nfi" }, { "alpha_fraction": 0.5288788080215454, "alphanum_fraction": 0.5628539323806763, "avg_line_length": 19.534883499145508, "blob_id": "604b4c4d2967decf2b3a6fb94774f7d7cee483a9", "content_id": "1e1e2dfd01bc13cfe08da229b7bc5200a5f5ddda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 883, "license_type": "no_license", "max_line_length": 108, "num_lines": 43, "path": "/tools/rezip.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nUSAGEMSG=\"usage: $(basename $0) CONFIGFILE\n\nRezips a file with pigs -11 option. Using the cluster with 64 CPUs.\n\nAuthor: Fabian Buske\n\nRequirements (modules):\n gi/pigz/2.3\n\"\nmodule load gi/pigz/2.3\n\n[ $# -lt 1 ] && echo \"$USAGEMSG\" >&2 && exit 1\n\nwhile getopts \"v\" opt;\ndo\n case ${opt} in\n v) VERBOSE=\"--verbose\";;\n \\?) print >&2 \"$0: error - unrecognized option $1\"\n exit 1;;\n esac\ndone\n\n\nfor arg\ndo\n\tF=\"${arg[$i]}\"\n\n\tif [[ ${F##*.} != \"gz\" ]]; then \n\t\techo \"file not zipped? $F skipped\" \n\t\tcontinue\n\tfi\n\n\techo \"#!/bin/sh\" > $F.pigztmp.sh\n\techo \"ls -la $F\" >> $F.pigztmp.sh\n\techo \"unpigz $F\" >> $F.pigztmp.sh\n\techo \"pigz -11 ${F/.gz/}\" >> $F.pigztmp.sh\n\techo \"ls -la $F\" >> $F.pigztmp.sh\n\tchmod 777 $F.pigztmp.sh\n\n\tqsub -V -S /bin/bash -j y -o $F.qout -cwd -pe smp 64 -l h_vmem=40G -N pigz_$F -l h_rt=4:00:00 $F.pigztmp.sh\ndone\n" }, { "alpha_fraction": 0.7511230707168579, "alphanum_fraction": 0.7654986381530762, "avg_line_length": 47.34782791137695, "blob_id": "ac4fd56a6d9c0d869d83d1fc03aeae7033ffd1cb", "content_id": "2576fa28be22f09c959254793f51b186439dea22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1113, "license_type": "no_license", "max_line_length": 121, "num_lines": 23, "path": "/segway/Readme.txt", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "This pipeline is for running segway v1.1.0 on the cluster and utilizes Wiggler to get all data tracks onto the same page.\n\nrun_segway.sh comes in 6 run modes:\n* -1 step 1 - collect the bam data from gagri\n* -2 step 2 - groom the bam data into bedGraph format\n* -3 step 3 - put the data as tracks into a genomedata archive\n* -4 step 4 - train the segway model\n* -5 step 5 - predict using the segway model\n* -6 step 6 - evaluate the output using segtools\n\nto perform any of these steps you need a config script that holds all relevant\nparametes and locations. An example is provided in config_example.sh.\n\nImportant note:\nrun_segway.sh generates scripts that can be submited to the cluster.\nThese scripts are not actually submitted/run in default mode. For that to\nhappen you need to start segway in armed mode (i.e. called with the parameter -a).\n\nSegway needs to run on the head node as it submits jobs itself and the compute\nnodes are not permitted to submit new jobs. Therefore it is advised to run\nevery of the above scripts in a screen. \nTo name and get back a screen of name segway, type:\nscreen -R segway\n\n" }, { "alpha_fraction": 0.6569767594337463, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 33.20000076293945, "blob_id": "c0ac08f843a55895e2770b734b2aaee7d2d0faec", "content_id": "c6fd744ffd580bd5644689980f30def6b3095a83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 172, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/cronjobs", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/bash -e\n# crontab entry\n# 0 23 * * * /home/fabbus/pipelines/cronjobs\nfind /share/ClusterScratch/fabbus/ -exec touch {} \\;\nfind /share/Temp/fabbus -exec touch {} \\;\n\n" }, { "alpha_fraction": 0.6493313312530518, "alphanum_fraction": 0.7028231620788574, "avg_line_length": 43.85333251953125, "blob_id": "a660f67c77aa21020c06663fd66b1c3317a2dbde", "content_id": "a75ae594ea4b3fd5cace6332c7c978ca83163761", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "R", "length_bytes": 3365, "license_type": "no_license", "max_line_length": 204, "num_lines": 75, "path": "/ChIP-QC/CHANCE/chance_plots.R", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "library(Repitools)\nlibrary(BSgenome.Hsapiens.UCSC.hg19)\nlibrary(ggplot2)\nlibrary(splines)\nlibrary(gridExtra)\nlibrary(tools)\n\nargs <- commandArgs(trailingOnly = TRUE)\nchip<-args[1]\ninput<-args[2]\noutputpath<-args[3]\n\nChIPQC <- function(rsChip, rsInput, ipname, cname, outputpath, windowSize=1000, dataPoints=1000)\n{\t\n\thg19.windows <- genomeBlocks(Hsapiens, chrs=1:24, windowSize)\n\tChIPcounts <- countOverlaps(hg19.windows, rsChIP)\n\tINPUTcounts <- countOverlaps(hg19.windows, rsINPUT)\n\t\n\thg19.ordered = data.frame(cbind(ChIPcounts, INPUTcounts)[order(ChIPcounts),])\n\t\n\thg19.ordered$ChIPcounts = cumsum(hg19.ordered$ChIPcounts)\n\thg19.ordered$INPUTcounts = cumsum(hg19.ordered$INPUTcounts)\n\t\n\t\n\thg19.ordered$ChIPcounts = hg19.ordered$ChIPcounts/hg19.ordered$ChIPcounts[length(hg19.windows)]\n\thg19.ordered$INPUTcounts = hg19.ordered$INPUTcounts/hg19.ordered$INPUTcounts[length(hg19.windows)]\t\n\n\tspaced <- c(round(seq(1,length(hg19.windows), by=(length(hg19.windows)-1)/dataPoints)))\n\tdf1 <- data.frame(\"bin\"=c(1: length(hg19.ordered$ChIPcounts[spaced])),\"value\"=hg19.ordered$ChIPcounts[spaced], \"type\"= ipname)\n\tdf2 <- data.frame(\"bin\"=c(1: length(hg19.ordered$INPUTcounts[spaced])),\"value\"=hg19.ordered$INPUTcounts[spaced], \"type\"= cname)\n\tdf <- data.frame(rbind(df1,df2))\n\tmaxdist <- which.max(abs(hg19.ordered$ChIPcounts[spaced]-hg19.ordered$INPUTcounts[spaced]))\n\t\n\tp1 <- ggplot(df, aes(x=bin, y=value, group=type)) \n\tp1 <- p1 + geom_vline(xintercept = maxdist, colour=\"grey\")\n\tp1 <- p1 + geom_line(aes(color=type))\n\tp1 <- p1 + xlab(\"Percentage of bins\") + ylab(\"Percentage of reads\") \n\tp1 <- p1 + theme(legend.position = c(0, 1), legend.justification = c(0, 1), legend.text=element_text(size=8), legend.title=element_blank(), legend.background = element_rect(fill = \"white\", colour = NA))\t\n\tp1 <- p1 + scale_x_continuous(breaks=seq(0,dataPoints, by=(dataPoints)/4), labels=c(0,25,50,75,100))\n\t\n\t\n\t# get derivative\n\tx1 = seq(1, dataPoints-2)\n\tx2 = seq(3, dataPoints)\n\tslope1 = 1/2*(hg19.ordered$ChIPcounts[spaced[x2]]-hg19.ordered$ChIPcounts[spaced[x1]])\n\tslope2 = 1/2*(hg19.ordered$INPUTcounts[spaced[x2]]-hg19.ordered$INPUTcounts[spaced[x1]])\n\t\n\tdf3 <- data.frame(bin=c(1: length(slope1)), value=slope1, type=\"ChIP\")\n\tdf4 <- data.frame(bin=c(1: length(slope1)), value=slope2, type=\"INPUT\")\n\tdf5 <- data.frame(rbind(df3,df4))\n\t\n\tp2 <- ggplot(df5, aes(x=bin, y=value, group=type)) + xlab(\"Percentage of bins\") + ylab(\"dy/dx\")\n\t#p2 <- p2 + geom_smooth(method = \"lm\",formula = y~bs(x, degree = 5),se = TRUE, alpha=0.5)\n\tp2 <- p2 + geom_vline(xintercept = maxdist, colour=\"grey\")\n\tp2 <- p2 + geom_line(aes(color=type))\n\tp2 <- p2 + theme(legend.position=\"none\")\n\tp2 <- p2 + scale_x_continuous(breaks=seq(0,dataPoints, by=(dataPoints)/4), labels=c(0,25,50,75,100))\n\t\n\t# print plots\n\tpdf(paste(outputpath,\"/\", ipname, \".pdf\", sep=\"\"), width=8, height=4)\n\tgrid.arrange(p1, p2 , ncol=2)\n\tdev.off()\n\n png(paste(outputpath,\"/\", ipname, \".png\", sep=\"\"), width=800, height=400, units = \"px\")\n grid.arrange(p1, p2 , ncol=2)\n dev.off()\n}\n\nrsChIP <- BAM2GRanges(chip)\nrsINPUT <- BAM2GRanges(input)\n#only count read starts, avoids reads being across multiple windows\nrsChIP <- resize(rsChIP, 1, fix=\"start\")\nrsINPUT <- resize(rsINPUT, 1, fix=\"start\")\n\nChIPQC(rsChip,rsInput, basename(file_path_sans_ext(chip)), basename(file_path_sans_ext(input)), outputpath)\n\n" }, { "alpha_fraction": 0.6215331554412842, "alphanum_fraction": 0.6377118825912476, "avg_line_length": 44.1478271484375, "blob_id": "f4f03d3521bb750a30bab66764230079d3cb1ea9", "content_id": "b3c4b71c86fd95bb84642b78d7c81c63df1d8e19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 5192, "license_type": "no_license", "max_line_length": 478, "num_lines": 115, "path": "/ChIP-QC/aggregate.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nUSAGEMSG=\"usage: $(basename $0) INPUTFOLDER OUTPUTFOLDER\n\nAggregates results from NCIS and CHANCE QC calculations\n\nAuthor: Fabian Buske\n\n* INPUTFOLDER - contains the results of NCIS (*.txt) and CHANCE (*.IPstrength)\n* OUTPUTFOLDER - specifies where the results are written to\n\"\n\nDIR=$(dirname $0)\nVERSION=\"0.0.1\"\n\n[ $# -lt 2 ] && echo \"$USAGEMSG\" >&2 && exit 1\n\nCHIP=\"\"\nCONTROL=\"\"\n\nwhile getopts \"\" opt;\ndo\n case ${opt} in\n \\?) print >&2 \"$0: error - unrecognized option $1\"\n exit 1;;\n esac\ndone\n\nshift $(($OPTIND-1))\nSOURCE=$1\nOUTPUT=$2\n\n[ ! -d ${SOURCE} ] && echo \"INPUT directory does not exist: ${INPUT}\" && exit 1\nmkdir -p ${OUTPUT}\n\necho \"ChIP\tControl\tCHANCE_BackgroundPercent\tNCIS_BackgroundPercent\tNCIS_Binsize\tNCIS_SeqDepth\tNCIS_Normalization\tCHANCE_Enriched\tCHANCE_Cumulative\tCHANCE_Scaling\tCHANCE_FDR\tCHANCE_FDR_TFBS_NORMAL\tCHANCE_FDR_HISTONE_NORMAL\tCHANCE_FDR_HISTONE_NORMAL\tCHANCE_FDR_HISTONE_CANCER\" > ${OUTPUT}/ChIP-QC_aggregate.txt\n\n\nfor line in $(ls -la ${SOURCE} | tail -n+4 | awk '{print $NF}' | awk -F\\. '{print $1}' | sort -u ); do\n\tb=$(basename $line)\n chip=$(echo ${b} | awk -F\\- '{print $1}')\n input=$(echo ${b} | awk -F\\- '{print $2}')\n\techo \"$chip $input\"\n\n\tNCIS_BACKGROUNDRATIO=\"\"\n\tNCIS_BACKGROUNDPERCENT=\"\"\n\tNCIS_BINSIZE=\"\"\n\tNCIS_NORMALIZATION=\"\"\n\tNCIS_SEQDEPTH=\"\"\n\n\tCHANCE_FDR=\"\"\n\tCHANCE_FDR_TFBS_NORMAL=\"\"\n\tCHANCE_FDR_HISTONE_NORMAL=\"\"\n\tCHANCE_FDR_HISTONE_NORMAL=\"\"\n\tCHANCE_FDR_HISTONE_CANCER=\"\"\n\tCHANCE_SCALING=\"\"\n\tCHANCE_ENRICHED=\"\"\n\tCHANCE_CUMMULATIVE=\"\"\n\tCHANCE_BACKGROUNDPERCENT=\"\"\n\n\tif [ -f ${SOURCE}/${line}.txt ]; then\n\t\tf=${SOURCE}/${line}.txt\n\t\tNCIS_BACKGROUNDRATIO=$(tail -n 1 $f | awk '{print $5}')\n\t\tNCIS_BACKGROUNDPERCENT=$(bc <<< ${NCIS_BACKGROUNDRATIO}*100)\n\t\tNCIS_BINSIZE=$(tail -n 1 $f | awk '{print $3}')\n\t\tNCIS_NORMALIZATION=$(tail -n 1 $f | awk '{print $2}')\n\t\tNCIS_SEQDEPTH=$(tail -n 1 $f | awk '{print $4}')\n\tfi\n\n\n\tif [ -f ${SOURCE}/${line}.IPstrength ]; then\n\t\tf=${SOURCE}/${line}.IPstrength\n\t CHANCE_FDR=$(cat $f | grep \"^fdr,\" | awk -F\\, '{print $2}')\n\t\tCHANCE_FDR_TFBS_NORMAL=$(cat $f | grep \"^tfbs_normal_fdr\" | awk -F\\, '{print $2}')\n CHANCE_FDR_HISTONE_NORMAL=$(cat $f | grep \"^tfbs_normal_fdr\" | awk -F\\, '{print $2}')\n CHANCE_FDR_TFBS_CANCER=$(cat $f | grep \"^histone_cancer_fdr\" | awk -F\\, '{print $2}')\n CHANCE_FDR_HISTONE_CANCER=$(cat $f | grep \"^histone_cancer_fdr\" | awk -F\\, '{print $2}')\n \tCHANCE_SCALING=$(cat $f | grep \"input_scaling_factor\" | awk -F\\, '{print $2}')\n\t CHANCE_ENRICHED=$(cat $f | grep \"percent_genome_enriched\" | awk -F\\, '{print $2}')\n \tCHANCE_CUMMULATIVE=$(cat $f | grep \"differential_percentage_enrichment\" | awk -F\\, '{print $2}')\n\t\tCHANCE_BACKGROUNDPERCENT=$(bc <<< 100-${CHANCE_CUMMULATIVE})\n\tfi\n\n\techo \"${chip}\t${input}\t${CHANCE_BACKGROUNDPERCENT}\t${NCIS_BACKGROUNDPERCENT}\t${NCIS_BINSIZE}\t${NCIS_SEQDEPTH}\t${NCIS_NORMALIZATION}\t${CHANCE_ENRICHED}\t${CHANCE_CUMMULATIVE}\t${CHANCE_SCALING}\t${CHANCE_FDR}\t${CHANCE_FDR_TFBS_NORMAL}\t${CHANCE_FDR_HISTONE_NORMAL}\t${CHANCE_FDR_TFBS_CANCER}\t${CHANCE_FDR_HISTONE_CANCER}\" >> ${OUTPUT}/ChIP-QC_aggregate.txt\ndone\n\nexit 1\n\nRSCRIPT=${OUTPUT}/ChIP-QC_aggregate.R\n\necho \"library(gridExtra)\" > ${RSCRIPT}\necho \"library(ggplot2)\" >> ${RSCRIPT}\necho \"library(scales)\" >> ${RSCRIPT}\necho \"data <- read.delim('${OUTPUT}/ChIP-QC_aggregate.txt', head=TRUE)\" >> ${RSCRIPT}\necho \"data <- cbind(data, 'CHANCE_BackgroundRatio'=0)\" >> ${RSCRIPT}\necho \"data['CHANCE_BackgroundRatio'] <- (100 - data['CHANCE_Cumulative'])/100\" >> ${RSCRIPT}\necho \"ChIP_cell <- sapply(strsplit(as.character(data[,1]), '_'), '[[', 2)\" >> ${RSCRIPT}\necho \"Control_cell <- sapply(strsplit(as.character(data[,2]), '_'), '[[', 2)\" >> ${RSCRIPT}\necho \"ChIP_mark <- sapply(strsplit(as.character(data[,1]), '_'), '[[', 3)\" >> ${RSCRIPT}\necho \"Control_mark <- sapply(strsplit(as.character(data[,2]), '_'), '[[', 3)\" >> ${RSCRIPT}\necho \"data <- cbind(data, ChIP_cell, Control_cell, ChIP_mark, Control_mark)\" >> ${RSCRIPT}\necho \"data <- data[with(data, order(ChIP_cell, ChIP_mark, Control_cell, Control_mark)),]\" >> ${RSCRIPT}\necho \"data[,1] <- factor(data[,1], levels=unique(data[,1]))\" >> ${RSCRIPT}\necho \"data[,2] <- factor(data[,2], levels=unique(data[,2]))\" >> ${RSCRIPT}\necho \"data['CHANCE_FDR'] <- sapply(data['CHANCE_FDR'], log10)\" >> ${RSCRIPT}\necho \"data['CHANCE_FDR'] <- -data['CHANCE_FDR'] \" >> ${RSCRIPT}\necho \"fmt <- '%.2f'\" >> ${RSCRIPT}\necho \"p1 <- ggplot(data, aes(Control,ChIP)) + geom_tile(aes(fill = BackgroundRatio), colour = 'white') + scale_fill_gradient2(limits=c(0.25, 1.),midpoint=0.63, high=muted('red'), mid='yellow', low=muted('steelblue'), na.value='steelblue') + theme(axis.text.x = element_text(angle = 45, hjust=1, size=8), axis.text.y=element_text(size=8)) + theme(legend.direction = 'horizontal', legend.position = 'top')+geom_text(aes(label=sprintf(fmt, BackgroundRatio)),size=2)\" >> ${RSCRIPT}\n\n\necho \"pdf(file='${OUTPUT}/ChIP-QC_aggregate.pdf')\" >> ${RSCRIPT}\necho \"p1\" >> ${RSCRIPT}\necho \"dev.off()\" >> CHANCE_aggregate.R\n\n/share/ClusterShare/software/contrib/Cancer-Epigenetics/tools/bin/Rscript ${OUTPUT}//ChIP-QC_aggregate.R\n" }, { "alpha_fraction": 0.6448049545288086, "alphanum_fraction": 0.6655160784721375, "avg_line_length": 77.29729461669922, "blob_id": "54896e5803074ff47b28fe78edb0e8e6cd94cb9e", "content_id": "3594f3776e6196322fc5d7a2bebe884586f43169", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2897, "license_type": "no_license", "max_line_length": 484, "num_lines": 37, "path": "/ChIP-QC/NCIS/aggregate.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\necho \"ChIP\tControl\tNormalization\tBinsize\tSeqDepth\tBackgroundRatio\" > NCIS_aggregate.txt\n\nfor f in `ls results/*.txt`; do\n\n\tb=$(basename $f)\n t=(${b//./ })\n\tchip=(${t%-*})\n\tinput=(${t##*-})\n\n\ttail -n 1 $f | awk -v i1=${chip} -v i2=${input} '{print i1\"\\t\"i2\"\\t\"$2\"\\t\"$3\"\\t\"$4\"\\t\"$5}' >> NCIS_aggregate.txt\ndone\n\necho \"library(gridExtra)\" > NCIS_aggregate.R\necho \"library(ggplot2)\" >> NCIS_aggregate.R\necho \"library(scales)\" >> NCIS_aggregate.R\necho \"data <- read.delim('NCIS_aggregate.txt', head=TRUE)\" >> NCIS_aggregate.R\necho \"ChIP_cell <- sapply(strsplit(as.character(data[,1]), '_'), '[[', 2)\" >> NCIS_aggregate.R\necho \"Control_cell <- sapply(strsplit(as.character(data[,2]), '_'), '[[', 2)\" >> NCIS_aggregate.R\necho \"ChIP_mark <- sapply(strsplit(as.character(data[,1]), '_'), '[[', 3)\" >> NCIS_aggregate.R\necho \"Control_mark <- sapply(strsplit(as.character(data[,2]), '_'), '[[', 3)\" >> NCIS_aggregate.R\necho \"data <- cbind(data, ChIP_cell, Control_cell, ChIP_mark, Control_mark)\" >> NCIS_aggregate.R\necho \"data <- data[with(data, order(ChIP_cell, ChIP_mark, Control_cell, Control_mark)),]\" >> NCIS_aggregate.R\necho \"data[,1] <- factor(data[,1], levels=unique(data[,1]))\" >> NCIS_aggregate.R\necho \"data[,2] <- factor(data[,2], levels=unique(data[,2]))\" >> NCIS_aggregate.R\necho \"fmt <- '%.2f'\" >> NCIS_aggregate.R\necho \"p1 <- ggplot(data, aes(Control,ChIP)) + geom_tile(aes(fill = BackgroundRatio), colour = 'white') + scale_fill_gradient2(limits=c(0.25, 1.),midpoint=0.63, high=muted('red'), mid='yellow', low=muted('steelblue'), na.value='steelblue') + theme(axis.text.x = element_text(angle = 45, hjust=1, size=8), axis.text.y=element_text(size=8)) + theme(legend.direction = 'horizontal', legend.position = 'top')+geom_text(aes(label=sprintf(fmt, BackgroundRatio)),size=2)\" >> NCIS_aggregate.R\necho \"p2 <- ggplot(data, aes(Control,ChIP)) + geom_tile(aes(fill = Normalization), colour = 'white') + scale_fill_gradient2(low = 'steelblue', mid='yellow', high = 'red') + theme(axis.text.x = element_text(angle = 45, hjust = 1, size=8), axis.text.y=element_text(size=8)) + theme(legend.direction = 'horizontal', legend.position = 'top')+geom_text(aes(label=sprintf(fmt, Normalization)),size=2)\" >> NCIS_aggregate.R\necho \"p3 <- ggplot(data, aes(x=Control,y=ChIP,z=Binsize), log10='z') + geom_tile(aes(fill = Binsize), colour = 'white') + scale_fill_gradient2(low = 'steelblue', mid='yellow', high = 'red') + theme(axis.text.x = element_text(angle = 45, hjust = 1, size=8), axis.text.y=element_text(size=8)) + theme(legend.direction = 'horizontal', legend.position = 'top')\" >> NCIS_aggregate.R\necho \"pdf(file='NCIS_aggregate.pdf')\" >> NCIS_aggregate.R\necho \"p1\" >> NCIS_aggregate.R\necho \"p2\" >> NCIS_aggregate.R\necho \"p3\" >> NCIS_aggregate.R\necho \"dev.off()\" >> NCIS_aggregate.R\n\n/share/ClusterShare/software/contrib/Cancer-Epigenetics/tools/bin/Rscript NCIS_aggregate.R\n" }, { "alpha_fraction": 0.6957403421401978, "alphanum_fraction": 0.7079107761383057, "avg_line_length": 28.878787994384766, "blob_id": "74ce7f35a1ac1ddce01ccfc4d111edb9814e471f", "content_id": "92618334372d100b6d5af76a9ec60b01820c48f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 986, "license_type": "no_license", "max_line_length": 149, "num_lines": 33, "path": "/MappabilityWithGem/makeMappability.sh", "repo_name": "jiangchb/pipelines", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# SGE \n#$ -cwd\n#$ -N GEM_indexer\n#$ -l h_vmem=8G\n#$ -b y\n#$ -j y\n#$ -V\n#$ -pe smp 8\n\n. ~/.profile\nmodule load fabbus/gem gi/ucsc_utils\nREFERENCE=\"/share/ClusterShare/biodata/contrib/genomeIndices_garvan/iGenomes/Mus_musculus/Mus_musculus/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa\"\n#gem-indexer -i $REFERENCE -o genome\n\nTAGSIZE=75\nif [ ! -s genome_$TAGSIZE.mappability ]; then\n\tgem-mappability -I genome.gem -o genome_$TAGSIZE -l $TAGSIZE -T 8\nfi\nif [ ! -s genome_$TAGSIZE.wig ]; then\n\tgem-2-wig -I genome.gem -i genome_$TAGSIZE.mappability -o genome_$TAGSIZE\n\twigToBigWig genome_$TAGSIZE.wig genome_$TAGSIZE.sizes genome_$TAGSIZE.bw\nfi\n\nTAGSIZE=50\nif [ ! -s genome_$TAGSIZE.mappability ]; then\n gem-mappability -I genome.gem -o genome_$TAGSIZE -l $TAGSIZE -T 8\nfi\nif [ ! -s genome_$TAGSIZE.wig ]; then\n gem-2-wig -I genome.gem -i genome_$TAGSIZE.mappability -o genome_$TAGSIZE\n\twigToBigWig genome_$TAGSIZE.wig genome_$TAGSIZE.sizes genome_$TAGSIZE.bw\nfi\n" } ]
31
cico1989/workshop_exercise
https://github.com/cico1989/workshop_exercise
4639d1e9dd1388789db61bbcd7120031b3f158ba
db233a6419b5e61a104742e210e147ee8a28c8ef
f8d944598294e9b6f6e74d97828341537b754879
refs/heads/master
2020-04-15T08:47:37.256842
2013-11-11T20:49:15
2013-11-11T20:49:15
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6914728879928589, "alphanum_fraction": 0.7100775241851807, "avg_line_length": 57.54545593261719, "blob_id": "66812641c57e26842d279704e7bd1fd766932900", "content_id": "b7db2bbf05aec951bbc7c8fe83856888c3d415a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 645, "license_type": "no_license", "max_line_length": 216, "num_lines": 11, "path": "/temp/exercise2_partone", "repo_name": "cico1989/workshop_exercise", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#Program:Get mean monthly precipitation value (column4) of each month (column2) for each file.\n\nfor file in `find fluxes*`;\ndo\n#Get the numbers in file name\nposition=`echo $file | cut -d \"_\" -f 2`_`echo $file | cut -d \"_\" -f 3`\ntitle=(Janurary February March April May June July August September October November December)\n#Get the sum of precipitation for one month and then divide the sum by number of years\nawk '{total[$2] += $4; count[$1]++} END {num=asort(count); for(month in total) printf \"mean %4s rainfall: %15.4f mm/month\\n\",month,total[month]/num}' $file | sort -k 1 -n > ../results/monthly_precipitation.$position\ndone\n\n" }, { "alpha_fraction": 0.669646680355072, "alphanum_fraction": 0.6990208625793457, "avg_line_length": 74.70967864990234, "blob_id": "124fc24814b59195ae7e359b590d3e73dfae5152", "content_id": "41bb1db27fdf470462cf60c3669a6f5201394e03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2349, "license_type": "no_license", "max_line_length": 414, "num_lines": 31, "path": "/temp/exercise2_400", "repo_name": "cico1989/workshop_exercise", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#Program:Get mean monthly precipitation value (column4) of each month (column2) for each file.\n\necho -e \"mean Janurary rainfall:\\nmean February rainfall:\\nmean March rainfall:\\nmean April rainfall:\\nmean May rainfall:\\nmean June rainfall:\\nmean July rainfall:\\nmean August rainfall:\\nmean September rainfall:\\nmean October rainfall:\\nmean November rainfall:\\nmean December rainfall:\" > ../results/temp1\nfor file in `find fluxes*`;\ndo\n#Get the numbers in file name\nposition=`echo $file | cut -d \"_\" -f 2`_`echo $file | cut -d \"_\" -f 3`\n#Get the sum of precipitation for one month and then divide the sum by number of years\nawk '{total[$2] += $4; count[$1]++} END {num=asort(count); for(month in total) printf \"%f mm/month %s\\n\",total[month]/num,month}' $file | sort -k 3 -n | awk '{printf \"%f %s\",$1,$2}' > ../results/temp2.$position\npaste ../results/temp1 ../results/temp2.$position > ../results/monthly_precipitation_$position #| echo ../results/monthly_precipitation_$position\necho $file | cut -d \"_\" -f 2 > ../results/latitude.$position\nawk '{$2=30901073.51*(1-$1^2/2+$1^4/24-$1^6/720+$1^8/40320)} END {print $2}' ../results/latitude.$position > ../results/area_$position #| echo ../results/area_$position\n#echo $file\ndone\n\nfor file in `find fluxes*`;\ndo\nposition=`echo $file | cut -d \"_\" -f 2`_`echo $file | cut -d \"_\" -f 3`\nhead -n 1 ../results/monthly_precipitation_$position | awk '{print $4}'# > ../results/temp3_$position.1\nhead -n 2 ../results/monthly_precipitation_$position | tail -n 1 | awk '{print $4}'# > ../results/temp3_$position.2 \n#paste ../results/temp3_$position.$i ../results/area_$position >> ../results/pa_$i \ndone\n\n#echo -e \"areal average Janurary rainfall:\\nareal average February rainfall:\\nareal average March rainfall:\\nareal average April rainfall:\\nareal average May rainfall:\\nareal average June rainfall:\\nareal average July rainfall:\\nareal average August rainfall:\\nareal average September rainfall:\\nareal average October rainfal:\\nareal average November rainfall:\\nareal average December rainfall:\" > ../results/temp4\n#for file in `find ../results/pa*`\n#do\n#awk '{suma += $1*$2; sumb += $2} END {printf \"%f mm/month\\n\",suma/sumb}' $file #>> ../results/temp5\n#echo $file | cut -d \"_\" -f 2 >> ../results/temp6\n#paste ../results/temp6 ../results/temp5 | sort -k 1 -n > ../results/areal_average\n#done\n\n\n" }, { "alpha_fraction": 0.5976331233978271, "alphanum_fraction": 0.6792899370193481, "avg_line_length": 75.81818389892578, "blob_id": "22abc50c08cede3a09e5abadc32ab54121d70aad", "content_id": "2264edc5c83b97b75509bf9d30997292085b9141", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 845, "license_type": "no_license", "max_line_length": 266, "num_lines": 11, "path": "/temp/exercise2 (copy)", "repo_name": "cico1989/workshop_exercise", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#Program:Get mean monthly precipitation value (column4) of each month (column2) for each file.\n\n\nawk '{for(i=1;i<NF+1;i++) sum[i] += $i} END {for(i=1;i<NF+1;i++) printf \"column%4s:%20.4f\\n\",i,sum[i]/NR}' fluxes_-15.2500_39.2500\n\n#Get the sum of precipitation for one month and then divide the sum by number of years\nawk '{total[$2] += $4; count[$1]++} END {num=asort(count);for(month in total) printf \"%4s:%15.4f\\n\",month,total[month]/num}' fluxes_-15.2500_39.2500 | sort -k 1 -n\n\n#Get the monthly precipitation for each year and then get the mean monthly rainfall\nawk '{total[$1,$2] += $4;count1[$1]++} END {num1=asort(count1);for(year in total) {split(year,total1,SUBSEP); sum[month]+=total1[year]};for(month in mean) {mean[month]=sum[month]/num1; printf \"%4s:%15.4f\\n\",month,mean[month]}}' fluxes_-15.2500_39.2500 | sort -k 1 -n\n" }, { "alpha_fraction": 0.6100599765777588, "alphanum_fraction": 0.6645131707191467, "avg_line_length": 53.150001525878906, "blob_id": "0531f694632d28fd5243539f8a53698752dc2cf3", "content_id": "56a4b0ad87c9c8be9562dfa311e780576a9a4773", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2167, "license_type": "no_license", "max_line_length": 201, "num_lines": 40, "path": "/temp/exercise2_change", "repo_name": "cico1989/workshop_exercise", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#Program:Get mean monthly precipitation value (column4) of each month (column2) for each file.\n#The input files are placed in the same folder named 'fluxes'.All files are generated in the folder named 'results'.\npath= `pwd`\ndirname= `echo $path/results`\nif [-d \"$dirname\"];then\n print 'results folder already existed!'\n rm -rf results\nelse \n mkdir results\nfi\ncd fluxes\nfor file in `find fluxes*`;\ndo\n#Get the numbers in file name\nposition=`echo $file | cut -d \"_\" -f 2`_`echo $file | cut -d \"_\" -f 3`\n#Get the sum of precipitation for one month and then divide the sum by number of years\nawk '{total[$2] += $4; count[$1]++} END {num=asort(count); for(month in total) printf \"%4s %f mm/month\\n\",month,total[month]/num}' $file | sort -k 1 -n > ../results/monthly_precipitation_$position\nawk '{printf \"%s %f\\n\",$1,$2}' ../results/monthly_precipitation_$position > ../results/temp1_$position\necho $file | cut -d \"_\" -f 2 > ../results/latitude.$position\nawk '{$2=30901073.51*(1-($1/180*3.1415926)^2/2+($1/180*3.1415926)^4/24-($1/180*3.1415926)^6/720+($1/180*3.1415926)^8/40320)} END {print $2}' ../results/latitude.$position > ../results/temp2_$position\nawk '{for(i=1;i<=12;i++) print $1}' ../results/temp2_$position > ../results/area_$position \npaste ../results/temp1_$position ../results/area_$position > ../results/info_$position\ndone\n\nfor file in `find ../results/info*`;\ndo\nawk '{print $0}' $file >> ../results/table\ndone\n\nawk '{suma[$1]+=$2*$3} END {for(month in suma) printf \"%s %f \\n\",month,suma[month]}' ../results/table | sort -k 1 -n > ../results/temp3\nawk '{sumb[$1]+=$3} END {for(month in sumb) printf \"%s %f \\n\",month,sumb[month]}' ../results/table | sort -k 1 -n | awk '{print $2}' > ../results/temp4\npaste ../results/temp3 ../results/temp4 > ../results/temp5\nawk '{average[$1]=$2/$3} END {for(month in average) printf \"areal average rainfall of the month %s : %10.4f mm/month\\n\",month,average[month]}' ../results/temp5 | sort -k 7 -n > ../results/areal_average\n\nfind ../results/temp* | xargs rm \nfind ../results/latitude* | xargs rm\nfind ../results/info* | xargs rm\nfind ../results/area_* | xargs rm \nfind ../results/table* | xargs rm\n\n" }, { "alpha_fraction": 0.7305936217308044, "alphanum_fraction": 0.7397260069847107, "avg_line_length": 30.285715103149414, "blob_id": "bb6a1b833f530be0c7c9282d749cd959f9d8d45d", "content_id": "2c20bbe917ea6ad11bd6b8bbab279c9938abd02e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 219, "license_type": "no_license", "max_line_length": 62, "num_lines": 7, "path": "/README.md", "repo_name": "cico1989/workshop_exercise", "src_encoding": "UTF-8", "text": "workshop_exercise\n=================\nThis is Qian's repo for computing workshops' exercises.\n\nThe folder named exercise_2 is the exercise of shell scripts.\n\nThe folder named exercise_3 is the exercise of python scripts.\n" }, { "alpha_fraction": 0.5619558095932007, "alphanum_fraction": 0.5927662253379822, "avg_line_length": 37.7662353515625, "blob_id": "4af4ba55f15835cea254aa46cd7c48e4425b569a", "content_id": "3e2da9ad357c45569b0ba6fea3972b7a32f17b28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2986, "license_type": "no_license", "max_line_length": 95, "num_lines": 77, "path": "/exercise_3/exercise3", "repo_name": "cico1989/workshop_exercise", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\nimport os\nimport numpy as np\nimport sys\n\n##get the latitude and longitude from the file name\n##extract the second column (month) and fourth column (rainfall) from the fluxes files\ncurrent_path = os.getcwd()\naddfolder = current_path,'fluxes'\nfolder_path = '/'.join(addfolder)\nresults_path = os.path.join(current_path,'results')\nif not os.path.isdir(results_path):\n os.makedirs(results_path)\nelse:\n answer = raw_input('results folder already existed! Overwrite or not(y/n):')\n if answer.strip().lower().startswith('y'):\n for file in os.listdir(results_path):\n targetfile = os.path.join(results_path, file)\n if os.path.isfile(targetfile):\n os.remove(targetfile)\n else:\n print 'Overwrite failed! Results may have problems' \nfor file in os.listdir(folder_path):\n if '~' not in file: #exclude the automatic backup files which are ends with '~'\n# print file\n lat = file.split(\"_\")[1]\n lon = file.split(\"_\")[2]\n addfile = folder_path, file\n filepath = '/'.join(addfile)\n data = np.loadtxt(filepath,usecols = (0,1,3))\n monthly_sum = np.zeros([12,1])\n monthly_mean = np.zeros([12,1])\n##data.shape[0] is the number of rows, data.shape[1] is the number of columns\n year_count = 1\n for i in range(1,data.shape[0]):\n if data[i,0] != data[i-1,0]:\n year_count = year_count + 1\n i += i\n# print year_count\n for i in range(0,data.shape[0]):\n monthly_sum[data[i,1]-1] += data[i,2]\n i += i \n# print monthly_sum\n for i in range(0,12):\n monthly_mean[i] = monthly_sum[i]/year_count\n# print monthly_mean\n tempname = 'monthly','precipitation',lat,lon\n filename = '_'.join(tempname)\n results_path_filename = os.path.join(results_path,filename)\n f = open(results_path_filename,'w')\n for i in range(0,12):\n f.write(\"%2d %.6f mm/month\\n\" % (i+1,monthly_mean[i]))\n f.close()\n area_factor = 3090078230*np.cos(float(lat)*np.pi/180)\n# print area_factor\n temp1_path = os.path.join(results_path,'temp1') \n for i in range(0,12):\n f = open(temp1_path,'a')\n f.write(\"%2d %.6f %.4f\\n\" % (i+1,monthly_mean[i],area_factor))\n f.close()\ndata1 = np.loadtxt(temp1_path)\nprep_times_area = np.zeros([12,1])\narea_sum = np.zeros([12,1])\nareal_mean = np.zeros([12,1])\n#print prep_times_area\nfor i in range(0,data1.shape[0]):\n prep_times_area[data1[i,0]-1] += data1[i,1]*data1[i,2]\n area_sum[data1[i,0]-1] += data1[i,2]\n i += i\nfor i in range(0,12):\n areal_mean[i] = prep_times_area[i]/area_sum[i]\n i += i\nareal_average_path = os.path.join(results_path,'areal_average')\nf = open(areal_average_path,'w')\nfor i in range(0,12):\n f.write(\"areal average rainfall of the month %2d: %.6f mm/month\\n\" % (i+1,areal_mean[i]))\nf.close()\n\n" } ]
6
ptdriscoll/nand2tetris
https://github.com/ptdriscoll/nand2tetris
c5ea68fbd299bd688b3445157edffc838a28c4c9
6f2aa45a8d3781ce58389b0e237756c1a84b2c2d
f82283902fa2dacc92fa5d7fc003976cb6600924
refs/heads/master
2020-04-27T22:07:20.120201
2019-03-09T17:38:33
2019-03-09T17:38:33
174,724,002
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.42876675724983215, "alphanum_fraction": 0.43532949686050415, "avg_line_length": 18.869565963745117, "blob_id": "3b639cf690ccbd205f09398b17ca13f0ab120b82", "content_id": "3afb3e707c6b0549949e170da9b05a4774dc137b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3657, "license_type": "no_license", "max_line_length": 96, "num_lines": 184, "path": "/project7/assembly_code.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nDictionaries and functions that map virtual-machine commands to assembly-code snippets. \n\"\"\"\n\n\nsymbol_table = {\n 'local': 'LCL', \n 'constant': 'constant',\n 'argument': 'ARG', \n 'this': 'THIS', \n 'that': 'THAT', \n 'temp': 'R5',\n 'pointer': 'R3'\n}\n\nmath_table = {\n 'add': 'D+M',\n 'sub': 'M-D',\n 'neg': '-M',\n 'eq': 'EQ',\n 'gt': 'GT',\n 'lt': 'LT',\n 'and': '&',\n 'or': '|',\n 'not': '!' \n}\n\ndef pop_cmd(segment, index, static=False):\n \"\"\"\n Accepts memory segment and memory segment index,\n and returns assembly code for pop command.\n \"\"\"\n \n code = ''''''\n \n if static:\n code += '''@{segment}.{index}\n D=A \n '''.format(segment=segment, index=index)\n \n else: \n code += '''@{index}\n D=A\n @{segment}\n '''.format(index=index, segment=segment)\n \n if segment == 'R3' or segment == 'R5':\n code += '''D=D+A\n ''' \n\n else:\n code += '''D=D+M\n ''' \n \n code += '''@R13\n M=D\n @SP\n M=M-1\n A=M\n D=M\n @R13\n A=M\n M=D\n '''\n \n return code.replace(' ','')\n \ndef push_cmd(segment, index, static=False):\n \"\"\"\n Accepts memory segment and memory segment index,\n and returns assembly code for push command. \n \"\"\"\n \n code = ''''''\n \n if static:\n code += '''@{segment}.{index}\n D=M\n '''.format(segment=segment, index=index) \n \n else: \n code += '''@{index}\n D=A\n '''.format(index=index)\n \n if segment != 'constant': \n code += '''@{segment}\n '''.format(segment=segment) \n \n if segment == 'R3' or segment == 'R5':\n code += '''A=D+A\n D=M\n ''' \n \n else:\n code += '''A=D+M\n D=M\n ''' \n \n code += '''@SP\n A=M\n M=D\n @SP\n M=M+1\n '''\n\n return code.replace(' ','')\n\ndef math_cmd(command):\n \"\"\"\n Accepts math command string and returns assembly code for math operation, \n and returns assembly code:\n - command = D+M, M-D or -M\n - command string = add, sub or neg\n \"\"\"\n \n if command == '-M':\n code = '''@SP\n A=M-1\n M=-M\n ''' \n else:\n code = '''@SP\n M=M-1\n A=M\n D=M\n A=A-1\n M={command}\n '''.format(command=command)\n \n return code.replace(' ','') \n \ndef compare_cmd(command, jump):\n \"\"\"\n Accepts two string arguments and returns assembly code for comparison operation, \n and returns assembly code:\n - command = EQ, GT or LT\n - command string = eq, gt or lt\n - jump label includes incremented number each time a jump is used by CodeWriter instance \n \"\"\"\n \n code = '''@SP\n M=M-1\n A=M\n D=M\n A=A-1\n D=M-D\n M=-1\n @{jump}\n D;J{command}\n @SP\n A=M-1\n M=0\n ({jump})\n '''.format(command=command, \n jump=jump)\n \n return code.replace(' ','')\n \ndef logic_cmd(command):\n \"\"\"\n Accepts logic command string and returns assembly code for logical operation, \n and returns assembly code:\n - command = &, |, !\n - command string = and, or, not \n \"\"\"\n \n if command == '!':\n code = '''@SP\n A=M-1\n M=!M\n ''' \n else:\n code = '''@SP\n M=M-1\n A=M\n D=M\n A=A-1\n M=D{command}M\n '''.format(command=command)\n \n return code.replace(' ','') " }, { "alpha_fraction": 0.521040678024292, "alphanum_fraction": 0.523310661315918, "avg_line_length": 34.296295166015625, "blob_id": "0fe7322431273395741f4a77c178774348acb895", "content_id": "74f54057a8e3d8df766c3cbfa65a812c6089739a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5727, "license_type": "no_license", "max_line_length": 123, "num_lines": 162, "path": "/project11/symbol_table.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nA symbol table that associates names with information needed for Jack compilation: type, kind, and\nrunning index. The symbol table has 2 nested scopes (class/subroutine).\n\"\"\"\n\n\nclass SymbolTable:\n \"\"\"\n This module provides services for creating, populating, and using a symbol table. Each symbol has a scope \n from which it is visible in the source code. In the symbol table, each symbol is given a running number (index) \n within the scope, where the index starts at 0 and is reset when starting a new scope. \n \n These identifiers may appear in the symbol table:\n - static: Scope: class\n - field: Scope: class\n - argument: Scope: subroutine (method/function/constructor)\n - var: Scope: subroutine (method/function/constructor)\n \n When compiling code, any identifier not found in the symbol table may be assumed to be a subroutine name \n or a class name. Since the Jack language syntax rules suffice for distinguishing between these two possibilities, \n and since no \"linking\" needs to be done by the compiler, these identifiers do not have to be kept in the symbol table. \n \"\"\"\n \n def __init__(self):\n \"\"\"\n Creates new empty symbol table.\n Example for method's position 1 argument for an integer (position 0 is for method's this object): \n {'sum': {'type': 'int', 'kind': 'argument', 'index': 1}} \n \"\"\"\n \n self._class = {}\n self._subroutine = {}\n self._var_count = {'static': 0, \n 'field': 0,\n 'argument': 0, \n 'var': 0}\n\n def __str__(self):\n to_print = 'Class Table\\n-----------\\n'\n if self._class:\n for key, value in self._class.items():\n to_print += key + '\\t' + value['type'] + '\\t' + value['kind'] + '\\t' + str(value['index']) + '\\n'\n else:\n to_print += 'No variables\\n' \n \n to_print += '\\nSubroutine Table\\n----------------\\n' \n if self._subroutine: \n to_print += 'NAME\\tKIND\\tTYPE\\tINDEX\\n===\\t====\\t====\\t=====\\n' \n for key, value in self._subroutine.items():\n to_print += key + '\\t' + value['type'] + '\\t' + value['kind'] + '\\t' + str(value['index']) + '\\n'\n else:\n to_print += 'No variables\\n' \n \n return to_print\n \n def start_subroutine(self):\n \"\"\"\n Starts new subroutine scope (i.e. erases all names in the previous subroutine's scope). \n \"\"\"\n \n self._subroutine = {}\n self._var_count['argument'] = 0\n self._var_count['var'] = 0 \n \n def define(self, name, type, kind):\n \"\"\"\n Accepts name, type (static, field, argument, var) and kind. \n Defines new identifier of a given name, type and kind, and assigns it a running index. \n static and field identifiers have a class scope, while argument and var identifiers have a subroutine scope. \n \"\"\" \n \n if kind in ['static', 'field']: \n self._class[name] = {'type': type, 'kind': kind, 'index': self._var_count[kind]} \n \n else:\n self._subroutine[name] = {'type': type, 'kind': kind, 'index': self._var_count[kind]}\n \n self._var_count[kind] += 1 \n \n def var_count(self, kind):\n \"\"\"\n Accepts kind (static, field, argument, var). \n Returns number of variables, as integer, of the given kind already defined in the current scope. \n \"\"\"\n \n return self._var_count[kind]\n \n def exists(self, name):\n \"\"\"\n Checks whether keyword is in symbol table and returns bool. \n \"\"\"\n \n if name in self._subroutine or name in self._class:\n return True \n\n return False \n\n def type_of(self, name):\n \"\"\"\n Returns the type of named identifier in current scope. \n \"\"\"\n \n if name in self._subroutine:\n return self._subroutine[name]['type']\n\n elif name in self._class:\n return self._class[name]['type'] \n\n return None \n \n def kind_of(self, name, vm=False):\n \"\"\"\n Returns the kind of named identifier in current scope (static, field, argument, var).\n If vm=True, then field is returned as this, and var is returned as local. \n Returns None if identifier is unknown in the current scope.\n \"\"\"\n \n kind = None\n \n vm_map = {'static': 'static',\n 'field': 'this',\n 'argument': 'argument',\n 'var': 'local',\n None: None}\n \n if name in self._subroutine:\n kind = self._subroutine[name]['kind']\n\n elif name in self._class:\n kind = self._class[name]['kind'] \n \n if vm:\n kind = vm_map[kind] \n\n return kind \n \n def index_of(self, name):\n \"\"\"\n Returns the index, as integer, assigned to named dentifier.\n \"\"\"\n \n if name in self._subroutine:\n return self._subroutine[name]['index']\n\n elif name in self._class:\n return self._class[name]['index'] \n\n return None\n \n def get_class_table(self):\n \"\"\"\n Returns symbol class table.\n \"\"\"\n return self._class \n \n def get_subroutine_table(self):\n \"\"\"\n Returns symbol subroutine table.\n \"\"\"\n return self._subroutine " }, { "alpha_fraction": 0.4791250228881836, "alphanum_fraction": 0.4820651412010193, "avg_line_length": 33.41093063354492, "blob_id": "f8d69b2e9f3eea5f86d18ee35180b44446f056a9", "content_id": "74c710bf695f1bec184656ae5f31eee7a794a21a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17006, "license_type": "no_license", "max_line_length": 124, "num_lines": 494, "path": "/project10/compilation_engine_xml.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nThis class is the second half of a syntactic analyzer (the tokenizer if the first half), \nwhich parses Jack programs according to the Jack grammar. The output is an XML formatted document. \n\"\"\"\n\nimport os, sys, ntpath, re\n\nif os.getcwd().endswith('Compiler'):\n from compiler import tokenizer\n \nelse:\n import tokenizer\n \n\nclass CompilationEngine:\n \"\"\"\n This module gets input from a tokenizer and writes a parsed XML structure \n into an output file/stream. This is done by a series of compilexxx() methods, \n where xxx is a corresponding syntactic element of the Jack grammar. \n \n The contract between these methods is that each compilexxx() method should \n read the syntactic construct xxx from the input, advance() the tokenizer exactly beyond xxx, \n and output the XML parsing of xxx. Thus, compilexxx()may only be called if \n xxx is the next syntactic element of the input.\n \"\"\"\n \n def __init__(self, input_full_path, output_full_path, test=False):\n \"\"\"\n creates a new compilation engine with the given input and output. \n The next method called must be compileClass().\n \"\"\"\n \n self._file_path = input_full_path \n self._file_name = ntpath.basename(input_full_path)\n self._file_open = open(output_full_path, 'w') \n self._tokenize = tokenizer.Tokenizer(input_full_path)\n self._tokenize.get_next_token() \n self._indent = 0\n self._IDENTIFIER_REGEX = re.compile('^[A-Za-z0-9_-][A-Za-z0-9_-]*$')\n \n self._dispatch_statement = {\n 'let': self.compile_let,\n 'if': self.compile_if,\n 'while': self.compile_while,\n 'do': self.compile_do,\n 'return': self.compile_return \n } \n \n self._tag_count = 0 \n self._xml = ''\n self._test = test\n \n if not self._test:\n self.compile_class()\n \n def __str__(self): \n to_print = ' Input file: ' + self._file_name + '\\n'\n to_print += 'Current token: ' + self._tokenize.get_current_token(xml=True) + '\\n'\n to_print += ' Tag count: ' + str(self._tag_count) + '\\n' \n return to_print\n \n def compile_class(self):\n \"\"\"\n Compiles complete class.\n 'class' className '{' classVarDec* subroutineDec* '}' \n \"\"\"\n \n self.tag('class')\n self._indent += 1\n \n self.eat('class')\n self.eat('className')\n self.eat('{')\n \n while self._tokenize.get_current_token() in ['static', 'field']: \n self.compile_class_var_dec()\n \n while self._tokenize.get_current_token() in ['constructor', 'function', 'method']: \n self.compile_subroutine() \n \n self.eat('}') \n \n self._indent -= 1\n self.tag('/class')\n\n #close writer\n self.close() \n \n def compile_class_var_dec(self):\n \"\"\"\n Compiles static declaration or field declaration. \n ('static' | 'field' ) type varName (',' varName)* ';' \n \"\"\"\n \n self.tag('classVarDec')\n self._indent += 1 \n \n self.eat(['static', 'field'])\n self.eat(['int', 'char', 'boolean', 'className'])\n self.eat('varName')\n while(self._tokenize.get_current_token() == ','):\n self.eat(',')\n self.eat('varName') \n self.eat(';')\n \n self._indent -= 1 \n self.tag('/classVarDec') \n \n def compile_subroutine(self):\n \"\"\"\n Compiles complete method, function or constructor. \n ('constructor' | 'function' | 'method') ('void' | type) \n subroutineName '(' parameterList ')' subroutineBody \n \"\"\"\n \n self.tag('subroutineDec') \n self._indent += 1 \n \n self.eat(['constructor', 'function', 'method'])\n self.eat(['void', 'int', 'char', 'boolean', 'className'])\n self.eat('subroutineName')\n self.eat('(')\n self.compile_parameter_list()\n self.eat(')') \n\n self.tag('subroutineBody') \n self._indent += 1\n \n self.eat('{')\n while(self._tokenize.get_current_token() == 'var'):\n self.compile_var_dec()\n self.compile_statements() \n self.eat('}')\n \n self._indent -= 1 \n self.tag('/subroutineBody') \n \n self._indent -= 1 \n self.tag('/subroutineDec') \n \n def compile_parameter_list(self):\n \"\"\"\n Compiles (possibly empty) parameter list, not including enclosing \"()\".\n ( (type varName) (',' type varName)*)? \n \"\"\"\n \n self.tag('parameterList')\n \n if self._tokenize.get_current_token() != ')': \n self._indent += 1\n\n self.eat(['int', 'char', 'boolean', 'className'])\n self.eat('varName') \n while(self._tokenize.get_current_token() == ','):\n self.eat(',')\n self.eat(['int', 'char', 'boolean', 'className'])\n self.eat('varName') \n \n self._indent -= 1\n \n self.tag('/parameterList')\n\n def compile_var_dec(self):\n \"\"\"\n Compiles var declaration.\n 'var' type varName (',' varName)* ';' \n \"\"\"\n \n self.tag('varDec') \n self._indent += 1\n \n self.eat('var')\n self.eat(['int', 'char', 'boolean', 'className']) \n self.eat('varName') \n while(self._tokenize.get_current_token() == ','):\n self.eat(',') \n self.eat('varName') \n self.eat(';') \n \n self._indent -= 1\n self.tag('/varDec') \n\n def compile_statements(self):\n \"\"\"\n Compiles sequence of statements, not including enclosing \"{}\". \n \"\"\"\n \n self.tag('statements') \n \n while(self._tokenize.get_current_token() in ['let', 'if', 'while', 'do', 'return']):\n self._dispatch_statement[self._tokenize.get_current_token()]() \n\n self.tag('/statements')\n\n def compile_subroutine_call(self):\n \"\"\"\n Compiles subroutine call statement. \n subroutineName '(' expressionList ')' | ( className | varName) \n '.' subroutineName '('expressionList ')' \n \"\"\"\n \n self.eat(['className', 'subroutineName', 'varName'])\n if self._tokenize.get_current_token() == '.':\n self.eat('.') \n self.eat('subroutineName') \n self.eat('(')\n self.compile_expression_list()\n self.eat(')') \n \n def compile_do(self):\n \"\"\"\n Compiles do statement. \n DO: 'do' subroutineCall ';' \n SUBROUTINECALL: subroutineName '(' expressionList ')' | ( className | varName) \n '.' subroutineName '('expressionList ')' \n \"\"\"\n \n self._indent += 1\n self.tag('doStatement') \n self._indent += 1\n \n self.eat('do')\n self.compile_subroutine_call()\n self.eat(';') \n \n self._indent -= 1\n self.tag('/doStatement')\n self._indent -= 1\n \n def compile_let(self):\n \"\"\"\n Compiles let statement. \n 'let' varName ('[' expression ']')? '=' expression ';' \n \"\"\"\n\n self._indent += 1\n self.tag('letStatement') \n self._indent += 1\n \n self.eat('let')\n self.eat('varName')\n while self._tokenize.get_current_token() == '[':\n self.eat('[')\n self.compile_expression()\n self.eat(']') \n self.eat('=') \n self.compile_expression()\n self.eat(';') \n \n self._indent -= 1\n self.tag('/letStatement')\n self._indent -= 1 \n\n def compile_while(self):\n \"\"\"\n Compiles while statement. \n 'while' '(' expression ')' '{' statements '}' \n \"\"\"\n \n self._indent += 1\n self.tag('whileStatement') \n self._indent += 1\n \n self.eat('while')\n self.eat('(')\n self.compile_expression()\n self.eat(')')\n \n self.eat('{')\n self.compile_statements()\n self.eat('}')\n\n self._indent -= 1\n self.tag('/whileStatement') \n self._indent -= 1 \n\n def compile_return(self):\n \"\"\"\n Compiles return statement.\n 'return' expression? ';' \n \"\"\"\n \n self._indent += 1\n self.tag('returnStatement') \n self._indent += 1\n \n self.eat('return')\n if self._tokenize.get_current_token() != ';':\n self.compile_expression()\n self.eat(';')\n\n self._indent -= 1\n self.tag('/returnStatement')\n self._indent -= 1 \n\n def compile_if(self):\n \"\"\"\n Compiles if statement, possibly with trailing else clause.\n 'if' '(' expression ')' '{' statements '}' ( 'else' '{' statements '}' )? \n \"\"\"\n \n self._indent += 1\n self.tag('ifStatement') \n self._indent += 1\n \n self.eat('if')\n self.eat('(') \n self.compile_expression()\n self.eat(')')\n \n self.eat('{')\n self.compile_statements()\n self.eat('}')\n if self._tokenize.get_current_token() == 'else':\n self.eat('else')\n self.eat('{')\n self.compile_statements()\n self.eat('}')\n\n self._indent -= 1\n self.tag('/ifStatement')\n self._indent -= 1 \n\n def compile_expression(self):\n \"\"\"\n Compiles expression. \n term (op term)* \n OP: '+' | '-' | '*' | '/' | '&' | '|' | '<' | '>' | '=' \n TERM: integerConstant | stringConstant | keywordConstant | varName | \n varName '[' expression']' | subroutineCall | \n '(' expression ')' | unaryOp term \n \"\"\"\n \n self.tag('expression')\n self._indent += 1\n \n if (self._tokenize.get_token_type() in ['integerConstant', 'stringConstant', 'identifier']\n or self._tokenize.get_current_token() in ['true', 'false', 'null', 'this', '(', '-', '~']): \n \n self.compile_term()\n \n if self._tokenize.get_current_token() in ['+', '-', '*', '/', '&', '|', '<', '>', '=']:\n self.eat(['+', '-', '*', '/', '&', '|', '<', '>', '='])\n self.compile_term()\n \n self._indent -= 1 \n self.tag('/expression') \n \n def compile_term(self):\n \"\"\"\n Compiles term. This method is faced with a slight difficulty when trying to \n decide between some of the alternative rules. Specifically, if the \n current token is an identifier, it must still distinguish between a variable, \n an array entry, and a subroutine call. \n \n The distinction can be made by looking ahead one extra token. A single \n look-ahead token, which may be one of \"[\", \"(\", \".\", suffices to distinguish \n between the three possibilities. Any other token is not part of this term \n and should not be advanced over.\n\n integerConstant | stringConstant | keywordConstant | varName | \n varName '[' expression']' | subroutineCall | \n '(' expression ')' | unaryOp term \n \"\"\"\n \n self.tag('term') \n self._indent += 1\n \n if self._tokenize.get_current_token() == '(':\n self.eat('(')\n self.compile_expression()\n self.eat(')')\n \n elif self._tokenize.get_current_token() in ['-', '~']:\n self.eat(['-', '~'])\n self.compile_term()\n \n else: \n cached_token, cached_index, cached_is_string, cached_state = self._tokenize.cache_current_token() \n next_token = self._tokenize.get_next_token() \n self._tokenize.reset_current_token(cached_token, cached_index, cached_is_string, cached_state) \n \n if next_token in ['(', '.']:\n self.compile_subroutine_call()\n \n elif next_token == '[': \n self.eat('varName') \n self.eat('[') \n self.compile_expression()\n self.eat(']')\n \n else: \n self.eat(['varName', 'integerConstant', 'stringConstant', 'true', 'false', 'null', 'this'])\n \n self._indent -= 1\n self.tag('/term')\n\n def compile_expression_list(self):\n \"\"\"\n Compiles (possibly empty) comma separated list of expressions.\n (expression (',' expression)* )? \n \"\"\"\n \n self.tag('expressionList')\n self._indent += 1\n \n if (self._tokenize.get_token_type() in ['integerConstant', 'stringConstant', 'identifier']\n or self._tokenize.get_current_token() in ['true', 'false', 'null', 'this', '(', '-', '~']):\n \n self.compile_expression() \n while(self._tokenize.get_current_token() == ','):\n self.eat(',')\n self.compile_expression() \n \n self._indent -= 1\n self.tag('/expressionList') \n \n def tag(self, type, token=None):\n \"\"\"\n Writes xml tag. \n \"\"\"\n \n if token:\n xml = (' ' * self._indent \n + '<' + type + '> ' \n + token \n + ' </' + type + '>\\n') \n \n else:\n xml = ' ' * self._indent + '<' + type + '>\\n' \n\n self._tag_count += 1\n self._xml += xml\n \n self._file_open.write(xml) \n\n def eat(self, terminal):\n \"\"\"\n Advances to next token and checks that it is what was expected.\n Writes code to file and test_text, or closes write file if processing over. \n \"\"\" \n\n token = self._tokenize.get_current_token()\n token_type = self._tokenize.get_token_type()\n \n #when a token is a keyword or symbol, it should match the terminal or a terminal option, input as a string or list \n if token_type in ['keyword', 'symbol'] and token not in terminal:\n error = ('\\nERROR 1: ' + token + ' != ' + str(terminal) + ' at tag ' + str(self._tag_count) \n + ' from file: ' + self._file_name) \n self.print_error(error) \n \n #when a token is an integer, then the terminal input should include 'integerConstant' \n elif token_type == 'integerConstant' and 'integerConstant' not in terminal:\n error = ('\\nERROR 2: ' + str(terminal) + ' does not include ' + token_type + ' at tag ' + str(self._tag_count)\n + ' from file: ' + self._file_name) \n self.print_error(error) \n \n #when a token is a string, then the terminal input should include 'stringConstant' \n elif token_type == 'stringConstant' and 'stringConstant' not in terminal: \n error = ('\\nERROR 3: ' + str(terminal) + ' does not include ' + token_type + ' at tag ' + str(self._tag_count)\n + ' from file: ' + self._file_name) \n self.print_error(error) \n \n #when a token is an identifier, then the terminal or a terminal option, input as a string or list, \n #should match an item in ['className', 'subroutineName', 'varName']\n elif token_type == 'identifier' and not any(x in terminal for x in ['className', 'subroutineName', 'varName']): \n error = ('\\nERROR 4: ' + str(terminal) + ' has no match in [\\'className\\', \\'subroutineName\\', \\'varName\\'] '\n + 'at tag ' + str(self._tag_count) + ' from file: ' + self._file_name) \n self.print_error(error) \n \n #no errors, so get current token \n else:\n token = self._tokenize.get_current_token(xml=True) \n self.tag(token_type, token)\n \n #advance to next token, if there is one \n token = self._tokenize.get_next_token()\n\n def close(self):\n \"\"\"\n Closes output file.\n \"\"\"\n \n self._file_open.close() \n\n def print_error(self, error):\n \"\"\"\n Prints information for error and exits program. \n \"\"\"\n \n print('\\n' + self._xml)\n sys.exit(error) " }, { "alpha_fraction": 0.49887615442276, "alphanum_fraction": 0.5051910281181335, "avg_line_length": 34.41444778442383, "blob_id": "1f631e35ca8b3d87cd6cc3795b1e530ade28c0f8", "content_id": "9d4088266e9d7c92dd1ee4a69bbb253a5d7d2076", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9343, "license_type": "no_license", "max_line_length": 112, "num_lines": 263, "path": "/project8/code_writer.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nThis class translates each VM command into assembly code. \n\"\"\"\n\nimport os\n\nif os.getcwd().endswith('VMTranslator'):\n from vm_translator import assembly_code as asm\n \nelse:\n import assembly_code as asm\n\n\nclass CodeWriter:\n \"\"\"\n Translates VM commands into assembly code.\n \"\"\"\n \n def __init__(self, full_path):\n \"\"\"\n Initializes virtual RAM for pointers and base address indices, \n and, if write_to_file=True, opens output file/stream and prepares to write into it.\n \"\"\"\n \n self._file_open = open(full_path, 'w') \n self._current_file_name = ''\n self._current_function_name = 'Sys'\n self._jump_count = 0 \n self._return_count = 0 \n \n self._dispatch = {\n 'C_ARITHMETIC': self.write_arithmetic,\n 'C_PUSH': self.write_push_pop,\n 'C_POP': self.write_push_pop,\n 'C_LABEL': self.write_flow,\n 'C_GOTO': self.write_flow,\n 'C_IF': self.write_flow,\n 'C_FUNCTION': self.write_function,\n 'C_RETURN': self.write_return,\n 'C_CALL': self.write_call \n }\n \n def __str__(self):\n to_print = ' Writing file: ' + self._current_file_name + '\\n'\n to_print += ' Current function: ' + self._current_function_name + '\\n'\n to_print += ' Jump count: ' + str(self._jump_count) + '\\n'\n to_print += ' Return count: ' + str(self._return_count) + '\\n'\n return to_print \n \n def set_file_name(self, file_name):\n \"\"\"\n Informs code writer that translation of a new VM file has started.\n \"\"\"\n \n self._current_file_name = file_name \n self._current_function_name = file_name #default until function declared \n self._jump_count = 0\n self._return_count = 0\n \n def set_function_name(self, function_name):\n \"\"\"\n Informs code writer that translation of a new VM file has started.\n \"\"\"\n \n self._current_function_name = function_name \n self._jump_count = 0 \n self._return_count = 0\n \n def write_init(self):\n \"\"\"\n Writes assembly code that initializes VM, or boostraps code. \n Code is placed at ROM[0] \n \"\"\" \n \n note = '// Initialize stack pointer to 256\\n'\n code = asm.assign_value_cmd('SP', '256') + '\\n' \n self._file_open.write(note + code) \n self.write_call('call', 'Sys.init', '0')\n \n def write_arithmetic(self, command):\n \"\"\"\n Writes assembly code that is a translation of given arithmetic command.\n \"\"\"\n \n note = '// ' + self._current_function_name + ': ' + command + '\\n'\n \n if command in ['add', 'sub', 'neg']:\n code = asm.math_cmd(asm.math_table[command])\n \n elif command in ['eq', 'gt', 'lt']:\n self._jump_count += 1\n jump = self._current_function_name + '$JUMP.' + str(self._jump_count)\n code = asm.compare_cmd(asm.math_table[command], jump) \n\n #if command is an and, or, not \n else:\n code = asm.logic_cmd(asm.math_table[command]) \n\n self._file_open.write(note + code + '\\n') \n return note + code + '\\n' \n \n def write_push_pop(self, command, segment, index, write=True):\n \"\"\"\n Writes assembly code that is the translation of a given command, \n where command is either:\n - C_PUSH\n - C_POP \n \"\"\"\n \n note = '// ' + self._current_function_name + ': ' \n note += command + ' ' + segment + ' ' + index + '\\n'\n \n static = False\n \n if segment == 'static':\n symbol = self._current_file_name \n static = True \n \n else: \n symbol = asm.symbol_table[segment] \n \n if command == 'pop': \n code = asm.pop_cmd(symbol, index, static=static) \n \n #if command == 'push' \n else:\n code = asm.push_cmd(symbol, index, static=static) \n \n if write:\n self._file_open.write(note + code + '\\n') \n return note + code + '\\n' \n \n def write_flow(self, command, label, note=True, write=True):\n \"\"\"\n Writes assembly code that translates label command. \n \"\"\" \n\n if label:\n label = self._current_function_name + '$' + label \n \n #if no label, then it's a function, so declare as current_function_name \n else:\n label = self._current_function_name\n \n if note:\n note = ' // ' + command \n \n code = asm.flow_cmd(command, label, note=note) + '\\n'\n\n if write:\n self._file_open.write(code) \n return code \n \n def write_function(self, command, function_name, num_locals):\n \"\"\"\n Writes assembly code that translates function command. \n \"\"\" \n\n self.set_function_name(function_name)\n note = '// function ' + self._current_function_name + ' ' + num_locals + '\\n' \n \n code = self.write_flow('label', '', note=False, write=False) \n\n for x in range(int(num_locals)):\n code += self.write_push_pop('push', 'constant', '0', write=False)\n #code += self.write_push_pop('pop', 'local', str(x), write=False) \n\n self._file_open.write(note + code) \n return note + code \n\n def write_call(self, command, function_name, num_args):\n \"\"\"\n Writes assembly code that translates call command. num_args is a string. \n \"\"\"\n \n note = '// call ' + function_name + ' ' + num_args + '\\n' \n \n #push return-address (using label below)\n self._return_count += 1\n return_address_label = self._current_function_name + '$return.' + str(self._return_count)\n code = asm.push_cmd('constant', return_address_label, note=' // push return-address\\n\\n') \n \n #save LCL of calling function\n code += asm.push_cmd('R1', '0', note=' // push LCL\\n\\n') \n \n #save ARG of calling function\n code += asm.push_cmd('R2', '0', note=' // push ARG\\n\\n') \n \n #save THIS of calling function\n code += asm.push_cmd('R3', '0', note=' // push THIS\\n\\n') \n \n #save THAT of calling function\n code += asm.push_cmd('R4', '0', note=' // push THAT\\n\\n') \n \n #reposition ARG (n=number of args)\n steps_back = 0 - 5 - int(num_args) \n code += asm.assign_cmd('ARG', 'SP', frame_steps=steps_back, note=' // ARG = SP-n-5\\n\\n')\n \n #reposition LCL\n code += asm.assign_cmd('LCL', 'SP', frame_steps=0, note=' // LCL = SP\\n\\n')\n \n #transfer control\n code += asm.flow_cmd('goto', function_name, note=' // goto f\\n') \n \n #label for return address \n code += asm.flow_cmd('label', return_address_label, note=' // (return-address)\\n') + '\\n' \n \n self._file_open.write(note + code) \n return note + code\n\n def write_return(self, command):\n \"\"\"\n Writes assembly code that translates return command. \n \"\"\" \n \n note = '// ' + command + ' from ' + self._current_function_name + '\\n' \n \n #save endFrame address as temp variable\n code = asm.assign_cmd('R14', 'LCL', note=' // endFrame = LCL\\n\\n') \n \n #save return address as another temp variable\n code += asm.assign_pointer_cmd('R15', 'R14', frame_steps=-5, note=' // retAddr = *(endFrame-5)\\n\\n')\n \n #Reposition return value for caller\n code += asm.pop_cmd('ARG', '0', note=' // *ARG = pop()\\n\\n')\n \n #Reposition SP of caller\n code += asm.assign_cmd('SP', 'ARG', frame_steps=1, note=' // SP = ARG+1\\n\\n')\n \n #Restore THAT of caller\n code += asm.assign_pointer_cmd('THAT', 'R14', frame_steps=-1, note=' // THAT = *(endFrame-1)\\n\\n')\n \n #Restore THIS of caller\n code += asm.assign_pointer_cmd('THIS', 'R14', frame_steps=-2, note=' // THIS = *(endFrame-2)\\n\\n')\n \n #Restore ARG of caller\n code += asm.assign_pointer_cmd('ARG', 'R14', frame_steps=-3, note=' // ARG = *(endFrame-3)\\n\\n')\n \n #Restore LCL of caller\n code += asm.assign_pointer_cmd('LCL', 'R14', frame_steps=-4, note=' // LCL = *(endFrame-4)\\n\\n') \n \n #Jump to return address in caller\n code += asm.flow_cmd('goto', 'R15', note=' // goto retAddr\\n')\n \n self._file_open.write(note + code) \n return note + code \n\n def write(self, command_type, args):\n \"\"\"\n Uses command_type passed from parser to call correct write method \n through self._dispatch mapping dictionary. \n \"\"\" \n\n return self._dispatch[command_type](*args) \n \n def close(self):\n \"\"\"\n Closes output file.\n \"\"\"\n \n self._file_open.close()\n \n \n \n \n \n" }, { "alpha_fraction": 0.5151095390319824, "alphanum_fraction": 0.5212087631225586, "avg_line_length": 22.58169937133789, "blob_id": "ccbcefb0537ab24640c432d041de884f1e58bcd6", "content_id": "c48525f502c74f72fd826208bd891d18e2055c14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3607, "license_type": "no_license", "max_line_length": 88, "num_lines": 153, "path": "/project11/vm_writer.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nFunctions and data maps that produce virtual machine commands. \n\"\"\"\n\n#Maps a VM op command. \nops = {'+': 'add\\n', \n '-': 'sub\\n', \n '*': 'call Math.multiply 2\\n', \n '/': 'call Math.divide 2\\n', \n '&': 'and\\n', \n '|': 'or\\n', \n '<': 'lt\\n', \n '>': 'gt\\n', \n '=': 'eq\\n',\n '~': 'not\\n', \n 'neg': 'neg\\n'}\n\n#Maps a VM unary op command. \nunary_ops = {'~': 'not\\n', '-': 'neg\\n'}\n\n#maps spefic terms to simple VM commands\n#true is NOT mapped to push constant 1 followed by a neg (-1) as described \n#on page 233 in the book (2008 paperback). It is instead mapped to \n#push contant 0 followed by a not (~0). This matches, and make testing compatible, \n#with the course's supplied JackCompiler (v 2.5). \nterms = {'true': 'push constant 0\\nnot\\n', \n 'false': 'push constant 0\\n', \n 'null': 'push constant 0\\n', \n 'this': 'push pointer 0\\n'}\n \ndef write_push(segment, index):\n \"\"\"\n Writes a VM push command.\n Accepts a segment:\n - constant\n - argument\n - local\n - static\n - this\n - that \n - pointer\n - temp\n Also accepts an index as an integer. \n \"\"\"\n \n code = 'push ' + segment + ' ' + index + '\\n'\n return code\n \ndef write_pop(segment, index):\n \"\"\"\n Writes a VM pop command. \n Accepts a segment:\n - constant\n - argument\n - local\n - static\n - this\n - that \n - pointer\n - temp\n Also accepts and index as an integer. \n \"\"\"\n \n code = 'pop ' + segment + ' ' + index + '\\n'\n return code\n \ndef write_arithmetic(command, unary=False):\n \"\"\"\n Writes a VM arithmetic command. \n Accepts an op command: +, -, *, /, &, |, <, >, = \n Or an unary op command: -, ~ \n \"\"\"\n \n if unary:\n code = unary_ops[command]\n \n else:\n code = ops[command]\n \n return code \n \ndef write_term(command):\n \"\"\"\n Writes simple terms: true, false, null, this \n \"\"\" \n \n code = terms[command]\n return code \n\ndef write_string(string):\n \"\"\"\n Writes String constant using the OS constructor String.new(length) \n and the OS method String.appendChar(nextChar). \n \"\"\" \n\n code = 'push constant ' + str(len(string)) + '\\n'\n code += 'call String.new 1\\n'\n \n for char in string:\n code += 'push constant ' + str(ord(char)) + '\\n'\n code += 'call String.appendChar 2\\n'\n \n return code \n\ndef write_label(label):\n \"\"\"\n Writes a VM label command. Accepts label.\n \"\"\"\n \n code = 'label ' + label + '\\n'\n return code\n\ndef write_goto(label):\n \"\"\"\n Writes a VM goto command. Accepts label. \n \"\"\"\n \n code = 'goto ' + label + '\\n'\n return code\n\ndef write_if(label):\n \"\"\"\n Writes a VM If-goto command. Accepts label.\n \"\"\"\n \n code = 'if-goto ' + label + '\\n'\n return code\n\ndef write_call(class_name, func_name, num_args):\n \"\"\"\n Writes a VM call command. Accepts class name, function name and number of arguments.\n \"\"\"\n \n code = 'call ' + class_name + '.' + func_name + ' ' + num_args + '\\n'\n return code\n\ndef write_function(class_name, func_name, num_args):\n \"\"\"\n Writes a VM function command. Accepts name and number of local variables.\n \"\"\"\n \n code = 'function ' + class_name + '.' + func_name + ' ' + num_args + '\\n'\n return code\n\ndef write_return():\n \"\"\"\n Writes a VM return command. \n \"\"\"\n \n code = 'return\\n'\n return code" }, { "alpha_fraction": 0.41006097197532654, "alphanum_fraction": 0.41158536076545715, "avg_line_length": 35.27777862548828, "blob_id": "470a97b5811ce861ce390103ceedf01cfe59000f", "content_id": "e665c0fee05989c7aac6630f1e3b738b6c0f1baa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 656, "license_type": "no_license", "max_line_length": 88, "num_lines": 18, "path": "/project11/lexical_elements.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nOut of five types of lexical elements, this includes two that can be looked up in lists.\nAlso included here is a lookup dictionary for XML entity codes.\n\"\"\"\n\n\nkeywords = ['class', 'constructor', 'function', 'method', \n 'field', 'static', 'var', \n 'int', 'char', 'boolean', 'void', \n 'true', 'false', 'null', 'this', \n 'let', 'do', 'if', 'else', 'while', 'return'] \n \nsymbols = ['{', '}', '(', ')', '[', ']', '.', ',', ';', \n '+', '-', '*', '/', '&', '|', '<', '>', '=', '~' ]\n \nxml_entities = {'<': '&lt;', '>': '&gt;', '\"': '&quot;', '&': '&amp;'} " }, { "alpha_fraction": 0.634647786617279, "alphanum_fraction": 0.635321855545044, "avg_line_length": 28.356435775756836, "blob_id": "3e7ab648f42149dc0ace25d78584fa711ab3a3ce", "content_id": "4ab26a52b4b9fe8843f1b7b4f0d47d6e1ead0e04", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2967, "license_type": "no_license", "max_line_length": 111, "num_lines": 101, "path": "/project7/VMTranslator.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nThe VM Translator accepts a single command line parameter, Xxx or Xxx.vm, where either \nXxx is a directory containing one more .vm files, or Xxx.vm is a file containing VM code.\n\nFROM vm_translator DIRECTORY:\n-prompt> python VMTranslator.py Xxx\n-prompt> python VMTranslator.py Xxx.vm\n\nFROM VMTranslator DIRECTORY one level up:\n-prompt> python -m vm_translator Xxx\n-prompt> python -m vm_translator Xxx.vm\n\nThe translator then translates the Xxx.vm file, or in case of a directory all .vm files. The result\nis always a single assembly-language file named Xxx.asm. \n\"\"\"\n\nimport os, sys, ntpath \n\nif os.getcwd().endswith('VMTranslator'):\n from vm_translator import vm_parser\n from vm_translator import code_writer\n \nelse:\n import vm_parser\n import code_writer\n \n\ndef translate_file(file, file_full_path, writer):\n \"\"\"\n Accepts name of virtual machine code file.\n Returns translation into assembly code, as a string. \n \"\"\"\n \n fname = file.replace('.vm', '')\n writer.set_file_name(fname)\n file_parser = vm_parser.Parser(file_full_path)\n \n while file_parser.has_more_commands():\n file_parser.advance()\n command_type = file_parser.command_type()\n args = file_parser.get_args()\n writer.write(command_type, args)\n \ndef main():\n \"\"\"\n Checks command argument to see if its a directory of virtual machine code files or just one file.\n Writes translated assembly code to one file, whether working with a directory of vm files or just one file.\n The translated file, with an .asm extension, is saved to the same directory where the vm file/s reside.\n \"\"\"\n\n #get directory or file from arg and, if on Windows, convert to back slashes\n to_translate = sys.argv[1].strip() \n to_translate = os.path.abspath(to_translate) \n \n #add trailing slash to last directory if it's missing\n if not to_translate.endswith('.vm'):\n to_translate = ntpath.join(to_translate, '') \n \n #get name to write to name.asm, using either name.vm or name directory \n path, tail = ntpath.split(to_translate) \n fname = tail or ntpath.basename(path)\n \n to_write = os.path.join(path, fname.replace('.vm', '') + '.asm') \n writer = code_writer.CodeWriter(to_write)\n \n if os.path.isdir(to_translate):\n print('Translating .vm files in directory: ' + to_translate)\n \n for root, dirs, files in os.walk(to_translate):\n for file in files:\n if file.endswith('.vm'):\n file_full_path = os.path.join(root, file)\n translate_file(file, file_full_path, writer)\n \n else:\n print('Translating file: ' + to_write) \n translate_file(fname, to_translate, writer)\n \n writer.close()\n print('Translation completed') \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \nif __name__ == '__main__':\n main() " }, { "alpha_fraction": 0.47230154275894165, "alphanum_fraction": 0.47344374656677246, "avg_line_length": 27.941667556762695, "blob_id": "38c6bfdee622cfc8bb456a6025db2c1e5064c47c", "content_id": "1e9b5b6ac442de326b1cca31222113ce6962d66b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3502, "license_type": "no_license", "max_line_length": 91, "num_lines": 120, "path": "/project7/code_writer.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nThis class translates each VM command into assembly code. \n\"\"\"\n\nimport os\n\nif os.getcwd().endswith('VMTranslator'):\n from vm_translator import assembly_code as asm\n \nelse:\n import assembly_code as asm\n\n\nclass CodeWriter:\n \"\"\"\n Translates VM commands into assembly code.\n \"\"\"\n \n def __init__(self, full_path):\n \"\"\"\n Initializes virtual RAM for pointers and base address indices, \n and, if write_to_file=True, opens output file/stream and prepares to write into it.\n \"\"\"\n \n self._file_open = open(full_path, 'w') \n self._file_name = ''\n self._jump_count = 0 \n \n self._dispatch = {\n 'C_ARITHMETIC': self.write_arithmetic,\n 'C_PUSH': self.write_push_pop,\n 'C_POP': self.write_push_pop,\n 'C_LABEL': None,\n 'C_GOTO': None,\n 'C_IF': None,\n 'C_FUNCTION': None,\n 'C_RETURN': None,\n 'C_CALL': None, \n }\n \n def __str__(self):\n to_print = 'Writing file: ' + self._file_name + '\\n' \n return to_print \n \n def set_file_name(self, fname):\n \"\"\"\n Informs code writer that translation of a new VM file has started.\n \"\"\"\n \n self._file_name = fname \n self._jump_count = 0\n \n def write_arithmetic(self, command):\n \"\"\"\n Writes assembly code that is a translation of given arithmetic command.\n \"\"\"\n \n note = '// ' + self._file_name + ': ' + command + '\\n'\n \n if command in ['add', 'sub', 'neg']:\n code = asm.math_cmd(asm.math_table[command])\n \n elif command in ['eq', 'gt', 'lt']:\n self._jump_count += 1\n jump = self._file_name + '$JUMP.' + str(self._jump_count)\n code = asm.compare_cmd(asm.math_table[command], jump) \n\n #if command is an and, or, not \n else:\n code = asm.logic_cmd(asm.math_table[command]) \n\n self._file_open.write(note + code + '\\n') \n return note + code + '\\n' \n \n def write_push_pop(self, command, segment, index):\n \"\"\"\n Writes assembly code that is the translation of a given command, \n where command is either:\n - C_PUSH\n - C_POP \n \"\"\"\n \n note = '// ' + self._file_name + ': ' \n note += command + ' ' + segment + ' ' + index + '\\n'\n \n static = False\n \n if segment == 'static':\n symbol = self._file_name \n static = True \n \n else: \n symbol = asm.symbol_table[segment] \n \n if command == 'pop': \n code = asm.pop_cmd(symbol, index, static=static) \n \n #if command == 'push' \n else:\n code = asm.push_cmd(symbol, index, static=static) \n \n self._file_open.write(note + code + '\\n') \n return note + code + '\\n' \n\n def write(self, command_type, args):\n \"\"\"\n Uses command_type passed from parser to call correct write method \n through self._dispatch mapping dictionary. \n \"\"\" \n \n self._dispatch[command_type](*args) \n \n def close(self):\n \"\"\"\n Closes output file.\n \"\"\"\n \n self._file_open.close()\n \n \n \n \n \n" }, { "alpha_fraction": 0.45200398564338684, "alphanum_fraction": 0.45942091941833496, "avg_line_length": 21.75, "blob_id": "adff1cd5827200ff8138403c1a44930a3e7337f7", "content_id": "3d7117384f14b3324147444fea66022df200e535", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7011, "license_type": "no_license", "max_line_length": 110, "num_lines": 308, "path": "/project8/assembly_code.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nDictionaries and functions that map virtual-machine commands to assembly-code snippets. \n\"\"\"\n\n\nsymbol_table = {\n 'local': 'LCL', \n 'constant': 'constant',\n 'argument': 'ARG', \n 'this': 'THIS', \n 'that': 'THAT', \n 'temp': 'R5',\n 'pointer': 'R3'\n}\n\nmath_table = {\n 'add': 'D+M',\n 'sub': 'M-D',\n 'neg': '-M',\n 'eq': 'EQ',\n 'gt': 'GT',\n 'lt': 'LT',\n 'and': '&',\n 'or': '|',\n 'not': '!' \n}\n\nnon_pointer_segments = ['R0','R1','R2','R3','R4','R5','R6','R7','R8','R9','R10','R11','R12','R13','R14','R15']\n\ndef pop_cmd(segment, index, static=False, note=''):\n \"\"\"\n Accepts memory segment and memory segment index,\n and returns assembly code for pop command.\n \"\"\"\n \n code = ''\n \n if static:\n code += '''@{segment}.{index}\n D=A \n '''.format(segment=segment, index=index)\n \n else: \n code += '''@{index}\n D=A\n @{segment}\n '''.format(index=index, segment=segment)\n \n if segment in non_pointer_segments:\n code += '''D=D+A\n ''' \n\n else:\n code += '''D=D+M\n ''' \n \n code += '''@R13\n M=D\n @SP\n M=M-1\n A=M\n D=M\n @R13\n A=M\n M=D''' \n \n code = code.replace(' ','') \n code += '{note}'.format(note=note) \n if not note:\n code += '\\n'\n \n return code\n \ndef push_cmd(segment, index, static=False, note=''):\n \"\"\"\n Accepts memory segment and memory segment index,\n and returns assembly code for push command. \n \"\"\"\n \n code = ''''''\n \n if static:\n code += '''@{segment}.{index}\n D=M\n '''.format(segment=segment, index=index) \n \n else: \n code += '''@{index}\n D=A\n '''.format(index=index)\n \n if segment != 'constant': \n code += '''@{segment}\n '''.format(segment=segment) \n \n if segment in non_pointer_segments:\n code += '''A=D+A\n D=M\n ''' \n \n else:\n code += '''A=D+M\n D=M\n ''' \n \n code += '''@SP\n A=M\n M=D\n @SP\n M=M+1'''\n \n code = code.replace(' ','') \n code += '{note}'.format(note=note) \n if not note:\n code += '\\n' \n\n return code\n\ndef math_cmd(command):\n \"\"\"\n Accepts math command string and returns assembly code for math operation, \n and returns assembly code:\n - command = D+M, M-D or -M\n - command string = add, sub or neg\n \"\"\"\n \n if command == '-M':\n code = '''@SP\n A=M-1\n M=-M\n ''' \n else:\n code = '''@SP\n M=M-1\n A=M\n D=M\n A=A-1\n M={command}\n '''.format(command=command)\n \n return code.replace(' ','') \n \ndef compare_cmd(command, jump):\n \"\"\"\n Accepts two string arguments and returns assembly code for comparison operation, \n and returns assembly code:\n - command = EQ, GT or LT\n - command string = eq, gt or lt\n - jump label includes incremented number each time a jump is used by CodeWriter instance \n \"\"\"\n \n code = '''@SP\n M=M-1\n A=M\n D=M\n A=A-1\n D=M-D\n M=-1\n @{jump}\n D;J{command}\n @SP\n A=M-1\n M=0\n ({jump})\n '''.format(command=command, \n jump=jump)\n \n return code.replace(' ','')\n \ndef logic_cmd(command):\n \"\"\"\n Accepts logic command string and returns assembly code for logical operation, \n and returns assembly code:\n - command = &, |, !\n - command string = and, or, not \n \"\"\"\n \n if command == '!':\n code = '''@SP\n A=M-1\n M=!M\n ''' \n else:\n code = '''@SP\n M=M-1\n A=M\n D=M\n A=A-1\n M=D{command}M\n '''.format(command=command)\n \n return code.replace(' ','') \n \ndef flow_cmd(command, label, note=''):\n \"\"\"\n Accepts program flow command string and returns assembly code.\n Program flow commands can be: label, goto or if-goto \n \"\"\"\n \n if command == 'label':\n code = '({label})\\n'.format(label=label)\n\n elif command == 'goto':\n code = '@{label}\\n'.format(label=label) \n \n #if label points to a temp variable, make it a pointer\n if label in ['R13','R14','R15']:\n code += 'A=M\\n'\n \n code += '0;JMP{note}\\n'.format(note=note) \n\n elif command == 'if-goto': \n code = '''@SP\n M=M-1\n A=M\n D=M\n ''' \n \n code = code.replace(' ','') \n code += '@{label}\\n'.format(label=label) \n code += 'D;JNE{note}\\n'.format(note=note) \n \n return code \n \ndef assign_cmd(save_to, save_from, frame_steps=None, note=''):\n \"\"\"\n Assigns value from one RAM location to another.\n Frame_steps are number of negative or positive steps away from save_from \n \"\"\"\n \n code = '''@{save_from}\n D=M\n '''.format(save_from=save_from)\n \n if frame_steps:\n if frame_steps < 0:\n plus_or_minus = '-'\n frame_steps = abs(frame_steps)\n else:\n plus_or_minus = '+' \n \n frame_steps = str(frame_steps) \n\n code += '''@{frame_steps}\n D=D{plus_or_minus}A\n '''.format(frame_steps=frame_steps, plus_or_minus=plus_or_minus) \n \n code += '''@{save_to}\n M=D'''.format(save_to=save_to) \n \n code = code.replace(' ','') \n code += '{note}'.format(note=note) \n \n return code \n \ndef assign_pointer_cmd(save_to, save_from, frame_steps=None, note=''):\n \"\"\"\n Assigns value from pointer to RAM location to another RAM location.\n Frame_steps are number of negative or positive steps away from save_from \n \"\"\"\n \n code = '@{save_from}\\n'.format(save_from=save_from)\n \n if frame_steps:\n if frame_steps < 0:\n plus_or_minus = '-'\n frame_steps = abs(frame_steps)\n else:\n plus_or_minus = '+' \n \n frame_steps = str(frame_steps)\n \n code += '''D=M\n @{frame_steps}\n A=D{plus_or_minus}A\n D=M\n '''.format(frame_steps=frame_steps, plus_or_minus=plus_or_minus) \n \n else:\n code += '''\n A=M\n D=M\n ''' \n \n code += '''@{save_to}\n M=D'''.format(save_to=save_to) \n \n code = code.replace(' ','') \n code += '{note}'.format(note=note) \n \n return code \n \ndef assign_value_cmd(save_to, save_from, note=''): \n \"\"\"\n Assigns value to RAM location. \n \"\"\" \n code = '''@{save_from}\n D=A\n @{save_to}\n M=D\n '''.format(save_from=save_from, save_to=save_to) \n \n code = code.replace(' ','') \n code += '{note}'.format(note=note) \n \n return code " }, { "alpha_fraction": 0.4566541612148285, "alphanum_fraction": 0.4586457312107086, "avg_line_length": 36.099998474121094, "blob_id": "261015e2253e9d18416ad824e27efd9126096242", "content_id": "6009015b280c38d91f6c737073b599df8a78fcc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8536, "license_type": "no_license", "max_line_length": 112, "num_lines": 230, "path": "/project11/tokenizer.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nThis class, when applied to a text file containing Jack code, produces a list of tokens:\n - symbol\n - keyword\n - identifier\n - integer constant\n - string constant \nEach is printed in a separate line, with its classification recorded using XML tags.\n\"\"\"\n\nimport os, re\n\nif os.getcwd().endswith('Compiler'):\n from compiler import lexical_elements\n \nelse:\n import lexical_elements\n \n\nclass Tokenizer:\n \"\"\"\n The tokenizer removes all comments and white space from the input stream \n and breaks it into tokens, as specified in the Jack grammar.\n \"\"\"\n \n def __init__(self, full_path):\n \"\"\"\n Opens the input file/stream and gets ready to tokenize it.\n \"\"\"\n \n self._file_path = full_path\n with open(full_path) as f:\n self._content = f.read() \n self._cursor_index = 0 \n self._current_token = '' \n self._current_token_is_string = False\n self._state = 'code' #can be 'code', 'block comment', 'inline comment' or 'string' \n self._IDENTIFIER_REGEX = re.compile('^[A-Za-z0-9_-][A-Za-z0-9_-]*$') \n \n def __str__(self): \n to_print = ' File path: ' + self._file_path + '\\n'\n to_print += ' Cursor index: ' + str(self._cursor_index) + '\\n'\n to_print += ' Current token: ' + self._current_token + '\\n'\n to_print += 'Token is string: ' + str(self._current_token_is_string) + '\\n'\n to_print += ' State: ' + self._state + '\\n'\n #to_print += ' File: \\n' + self._content + '\\n'\n return to_print\n \n def has_more_to_process(self):\n \"\"\"\n Do we have more to process from input? Returns boolean. \n \"\"\" \n\n return self._cursor_index < len(self._content) \n\n def advance(self):\n \"\"\"\n Gets next token, if there is one, from input and makes it current token. Should only be called if \n has_more_to_process() is true. Initially there is no current token.\n \"\"\"\n \n self._current_token = ''\n self._current_token_is_string = False\n char = ''\n next_char = '' \n getting_token = True \n \n while(getting_token and self.has_more_to_process()): \n char = self._content[self._cursor_index]\n \n if self._state == 'block comment':\n if self._cursor_index + 1 < len(self._content):\n next_char = self._content[self._cursor_index + 1]\n \n if char == '*' and next_char == '/':\n self._state = 'code'\n self._cursor_index += 1 #advance to next_char\n \n elif self._state == 'inline comment':\n if char == '\\n':\n self._state = 'code'\n \n elif self._state == 'string':\n if char == '\"':\n self._current_token_is_string = True\n self._state = 'code'\n getting_token = False\n \n else:\n self._current_token += char \n \n #handles 'code' state \n else: \n\n #a space or newline ends a token - but ignore if no token stored yet in self._current_token\n if char in [' ', '\\n', '\\t']:\n if len(self._current_token) > 0: \n getting_token = False\n \n #a symbol ends a token - but is the token itself if no token stored yet in self._current_token\n elif char in lexical_elements.symbols: \n\n #get next character to see if '/' instead starts a comment \n if self._cursor_index + 1 < len(self._content):\n next_char = self._content[self._cursor_index + 1] \n \n if char == '/' and next_char in ['*', '/']: \n self._cursor_index += 1\n \n if next_char == '*':\n self._state = 'block comment' \n \n else:\n self._state = 'inline comment' \n \n #now we know we're handling a symbol and not a comment\n else:\n getting_token = False \n \n #if there is no token stored yet in self._current_token, then the symbol is a token \n if len(self._current_token) == 0:\n self._current_token += char \n \n #if there is a token stored in self._current_token, then symbol ends that token\n #return now so cursor is not advanced, so symbol can be picked up as token in next call \n else: \n return len(self._current_token) > 0 \n \n elif char == '\"':\n self._state = 'string'\n \n #this is another character in a token \n else:\n self._current_token += char \n \n self._cursor_index += 1\n \n return len(self._current_token) > 0 \n \n def get_token_type(self):\n \"\"\"\n Returns type of the current token:\n - keyword\n - symbol\n - identifier\n - integerConstant\n - stringConstant\n \"\"\"\n \n #self._current_token_is_string is first so that strings aren't marked as keywords\n if self._current_token_is_string: \n return 'stringConstant' \n \n elif self._current_token in lexical_elements.keywords:\n return 'keyword'\n \n elif self._current_token in lexical_elements.symbols:\n return 'symbol' \n \n elif self._current_token.isdigit():\n return 'integerConstant'\n \n elif self._IDENTIFIER_REGEX.match(self._current_token):\n return 'identifier' \n\n print('\\nCURRENT TOKEN IS NOT A VALID TYPE:', self._current_token, '\\n') \n \n def get_current_token(self, xml=False):\n \"\"\"\n Returns current token. \n \"\"\"\n \n if xml and self._current_token in lexical_elements.xml_entities:\n return lexical_elements.xml_entities[self._current_token]\n \n return self._current_token \n \n def get_next_token(self, xml=False):\n \"\"\"\n Checks to see if there is another token, and if there is, returns token. \n \"\"\"\n \n if self.has_more_to_process():\n token = self.advance() \n if token:\n return self.get_current_token(xml=xml)\n\n return None \n\n def cache_current_token(self):\n \"\"\"\n Returns current token, cursor index and states for caching. \n \"\"\" \n\n return self._current_token, self._cursor_index, self._current_token_is_string, self._state \n\n def reset_current_token(self, token, index, is_string, state):\n \"\"\"\n Sets or resets current token, cursor index and states to cached values. \n \"\"\" \n \n self._current_token = token\n self._cursor_index = index \n self._current_token_is_string = is_string\n self._state = state\n \n def write_xml(self):\n \"\"\"\n Writes output to XML file. \n \"\"\"\n \n token = ''\n tag = ''\n \n translation_file = self._file_path.replace('.jack','T.xml')\n with open(translation_file, 'w') as f:\n \n f.write('<tokens>\\n')\n while self.has_more_to_process():\n \n token = self.advance() \n if token: \n tag = ('<' + self.get_token_type() + '> ' \n + self.get_current_token(xml=True) \n + ' </' + self.get_token_type() + '>\\n') \n f.write(tag)\n \n f.write('</tokens>\\n') " }, { "alpha_fraction": 0.4900255799293518, "alphanum_fraction": 0.49207159876823425, "avg_line_length": 30, "blob_id": "b01fc2517968ac064c7df5653627cba756d30641", "content_id": "f7737f03bbddb378b49a7a94e652c07acba262d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3910, "license_type": "no_license", "max_line_length": 100, "num_lines": 125, "path": "/project7/vm_parser.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nThis class handles parsing of a single .vm file\n\"\"\"\n\nimport os\n\nif os.getcwd().endswith('VMTranslator'):\n from vm_translator import assembly_code as asm\n \nelse:\n import assembly_code as asm\n \n\nclass Parser:\n \"\"\"\n Encapsulates access to input code. \n \n Reads a VM command, parses it and provides convenient access\n to its components. In addition, removes all white space and comments. \n \"\"\"\n \n def __init__(self, fname):\n \"\"\"\n Opens input file/stream and prepares to parse.\n \"\"\"\n \n self._file_path = fname \n with open(fname) as f:\n self._content = [line.strip() for line in f.readlines()]\n \n self._math_commands = list(asm.math_table.keys()) \n self._current_command = 0 \n self._next_command = 0\n self._current_args = []\n \n def __str__(self): \n to_print = ' Reading file: ' + self._file_path + '\\n'\n to_print += ' Current line number: ' + str(self._current_command) + '\\n'\n to_print += ' Current line: ' + self._content[self._current_command] + '\\n'\n to_print += ' Next command line: ' + str(self._next_command) + '\\n'\n to_print += ' Next command: ' + self._content[self._next_command] + '\\n' \n return to_print \n \n def has_more_commands(self):\n \"\"\"\n Checks to see if there are any more commands in input.\n Sets line index of next command, if there is one, and returns Boolean. \n \"\"\"\n \n #if current command has already run once, then increment by 1 \n start = 0\n if self._current_command:\n start = self._current_command + 1\n end = len(self._content) \n \n for index in range(start, end):\n line = self._content[index]\n \n if not len(line) or line.replace(' ','').startswith('//'):\n continue\n \n #if command in ['pop','push','add','sub','neg','eq','gt','lt','and','or','not']\n elif line.split()[0] in ['pop','push'] + self._math_commands: \n self._next_command = index \n return True \n\n else:\n continue \n\n return False \n \n def advance(self):\n \"\"\"\n Reads the next command from input, makes it the current command and runs parse_command(). \n Should be called only if has_more_commands() is true. Initially there is no current command.\n \"\"\" \n \n self._current_command = self._next_command \n self._parse_command() \n return self._current_command \n\n def _parse_command(self):\n \"\"\"\n Parses command into its parts and saves it as a list to self._current_args \n \"\"\"\n \n self._current_args = self._content[self._current_command].split()\n \n \n def command_type(self):\n \"\"\"\n Returns type of current command:\n - C_ARITHMETIC\n - C_PUSH, C_POP\n - C_LABEL, C_GOTO\n - C_IF\n - C_FUNCTION\n - C_RETURN\n - C_CALL \n \n C_ARITHMETIC is returned for all arithmetic VM commands.\n \"\"\" \n \n command = self._current_args[0]\n \n #if command in ['add','sub','neg','eq','gt','lt','and','or','not']\n if command in self._math_commands: \n return 'C_ARITHMETIC'\n \n elif command == 'pop':\n return 'C_POP'\n \n elif command == 'push':\n return 'C_PUSH'\n \n return None \n \n def get_args(self): \n \"\"\"\n Returns current args as list. \n \"\"\" \n \n return self._current_args \n \n \n \n \n \n " }, { "alpha_fraction": 0.5882725715637207, "alphanum_fraction": 0.5892234444618225, "avg_line_length": 33.2717399597168, "blob_id": "1dfe292c512f0509a27d3bc98b2fae5a82b53d7e", "content_id": "76639ffe3ce7e6b83f5f1a8accc9ae4bc9f40005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3155, "license_type": "no_license", "max_line_length": 116, "num_lines": 92, "path": "/project11/JackCompiler.py", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \n\n\"\"\"\nThe JackCompiler accepts a single command line parameter, Xxx or Xxx.jack, where either \nXxx is a directory containing one more .jack files, or Xxx.jack is a file containing Jack code.\n\nFROM compiler DIRECTORY:\n-prompt> python JackCompiler.py Xxx\n-prompt> python JackCompiler.py Xxx.jack\n-prompt> python JackCompiler.py run_all\n\nFROM Compiler root DIRECTORY, one level up from compiler:\n-prompt> python -m compiler Xxx\n-prompt> python -m compiler Xxx.jack\n-prompt> python -m compiler run_all\n\nThe analyzer then parses the Xxx.jack file, or in case of a directory all .jacks files. A corresponding Xxx.vm file \nof virtual machine commands for each .jack file is created and placed in the same directory as the .jack file/s. \n\"\"\"\n\nimport os, sys, ntpath \n\nif os.getcwd().endswith('Compiler'):\n from compiler import compilation_engine\n \nelse:\n import compilation_engine\n \n \ndef parse_file(file, file_full_path, writer):\n \"\"\"\n Accepts name of .jack code file and returns virtual machine commands file \n \"\"\"\n \n pass \n \n\ndef parse(arg):\n \"\"\"\n Checks command argument to see if its a directory of .jack files or just one .jack file.\n Writes corresponding .vm file for each .jack file, and saves to the same directory as the .jack file/s.\n \"\"\"\n\n #get directory or file from arg and, if on Windows, convert to back slashes\n print('\\nUser input: \\n\\t' + arg)\n to_translate = arg.strip() \n to_translate = os.path.abspath(to_translate)\n \n if os.path.isdir(to_translate):\n print('\\nParsing .jack files in directory: \\n\\t' + to_translate)\n \n for root, dirs, files in os.walk(to_translate):\n for file in files:\n if file.endswith('.jack'):\n file_full_path = os.path.join(root, file)\n file_to_write = file_full_path.replace('.jack', '.vm')\n compilation_engine.CompilationEngine(file_full_path, file_to_write)\n \n else:\n print('\\nParsing file: \\n\\t' + to_translate)\n to_write = to_translate.replace('.jack', '.vm')\n compilation_engine.CompilationEngine(to_translate, to_write)\n \n print('\\nParsing completed') \n print('\\n----------------------------------------------------------------------') \n \ndef main():\n \"\"\"\n Checks whether arg is run_all, or a file or directory.\n If run_all, then walks through data directory to parse all .jack files and directories. \n \"\"\"\n \n if sys.argv[1] == 'run_all':\n if os.getcwd().endswith('Compiler'):\n start_directory = 'data/'\n else:\n start_directory = '../data/' \n \n for root, dirs, files in os.walk(start_directory):\n for file in files:\n if file.endswith('.jack'):\n fname = os.path.join(root, file)\n parse(fname) \n for dir in dirs:\n dname = os.path.join(root, dir)\n parse(dname) \n \n else:\n parse(sys.argv[1])\n \nif __name__ == '__main__':\n main() " }, { "alpha_fraction": 0.7869042754173279, "alphanum_fraction": 0.7946532368659973, "avg_line_length": 60.46428680419922, "blob_id": "f8ab3f8937eb4e71f53d080c448d4cbd472e7577", "content_id": "3834c8b28ff97f5d1ba7f4ae8a38c5e34408f0e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5162, "license_type": "no_license", "max_line_length": 317, "num_lines": 84, "path": "/README.md", "repo_name": "ptdriscoll/nand2tetris", "src_encoding": "UTF-8", "text": "# From Nand to Tetris \n\nThese are assignment submissions for a two-part Coursera course called From Nand to Tetris. The course steps through how to build a simple general-purpose computer, from hardware switching circuits to high-level object-based software design.\n\nThe course's objective is to integrate key ideas involving algorithms, computer architecture, operating systems, compilers and software engineering into a unified framework. An accompanying book is called The Elements of Computing Systems: Building a Modern Computer from First Principles. See reference links below. \n\n<br>\n\n<img src=\"img/nand2tetris.png\" width=\"675\">\n\n### Project 1: Boolean Functions and Logic Gates\n\nA set of 15 elementary logic gates, designed from primative Nand gates and implemented with a simple Hardware Description Language (HDL). The chipset is later used to construct the computer's Arithmetic Logic Unit (ALU) and memory system.\n\n### Project 2: Combinational Logic and the ALU\n\nA family of binary adders - chips designed to add numbers, and a simple Arithmetic Logic Unit (ALU). The ALU performs arithmetic and logical operations and is the computer's calculating brain. The ALU is later used to build the computer's Central Processing Unit (CPU).\n\n### Project 3: Sequential Logic and Memory\n\nA hierarcy of memory chips, from elementary flip-flop gates, to one-bit registers, to n-bit registers, to a family of Random Access Memory (RAM) chips. Unlike processing chips, based on combinational logic, these require clock-based sequential logic.\n\n### Project 4: Machine Language\n\nLow-level assembly programs written with the Hack machine language.\n\n### Project 5: Computer Architecture\n\nAn integration of previous chipsets into a general-purpose 16-bit computer called Hack, which can execute programs written in the Hack machine language. The computer includes a Central Processing Unit (CPU), which is integrated with the RAM.\n\n### Project 6: Assembler\n\nAn assembler that translates a symbolic machine language, also known as assembly, into into binary 0s and 1s. The resultig binary code executes as-is on the Hack platform. Translation techniques include parsing, a symbol table, and macro-assembly. \n\nHere is the Python application, with unit tests: [Hack Assembler](https://github.com/ptdriscoll/hack-assembler)\n\n### Project 7: Virtual Machine I - Stack Arithmetic\n\nThe first half of a virtual machine, which translates virtual machine language into Hack assembly. Focused on stack-based arithmetic and memory access operations, this begins the back end of a program compiler. Modern software architectures such as Java and .NET use such two-tier compilers.\n\n### Project 8: Virtual Machine II - Program Control\n\nAn extension of the virtual machine translater, which adds flow control and subroutine call-and-return commands. This completes the virtual machine. \n\nHere is the full Python application for parts I and II, with unit tests: [Hack Virtual Machine Translator](https://github.com/ptdriscoll/hack-virtual-machine-translator)\n\n### Project 9: High-Level Language\n\nA pong game, in which the player competes against the computer using several difficulty levels. It's written in Jack, a simple high-level object-oriented lanaguage with a Java-like syntax, which runs on the Hack platform. \n\n### Project 10: Compiler I - Syntax Analysis\n\nA syntax analyzer that parses Jack programs. Using recursive algorithms, the analyzer outputs an XML file that reflects the structure of translated programs. In the next project, the logic and code is morphed to produce virtual machine code. \n\n### Project 11: Compiler II - Code Generation\n\nA full-scale Jack compiler, which outputs virtual machine code that runs on the stack machine built in projects 7 and 8. \n\nHere is the full Python application for parts I and II, with unit tests and versioned files: [Jack Compiler](https://github.com/ptdriscoll/jack-compiler)\n\n### Project 12: Operating System\n\nEight classes that make up the operating system, written in Jack using a modular collection of algorithms. Some classical arithmetic and geometric algorithms come into play, as well as classical mathematical, memory management, string processing, and input/output algorithms. \n\nThe classes are: \n \n- Sys: Provides some execution-related services\n- Memory: Handles memory operations\n- Math: Provides basic mathematical operations\n- Screen: Handles graphic screen output\n- Output: Handles text based output\n- Keyboard: Handles user input from the keyboard\n- String: Implements the String type and basic string-related operations\n- Array: Defines the Array type and allows construction and disposal of arrays\n\n### References\n\n- Website: https://www.nand2tetris.org/software\n- Book: https://www.amazon.com/Elements-Computing-Systems-Building-Principles/dp/0262640686/ref=ed_oe_p\n- Course I: https://www.coursera.org/learn/build-a-computer\n- Course II: https://www.coursera.org/learn/nand2tetris2\n- Hack Assembler: https://github.com/ptdriscoll/hack-assembler\n- Hack Virtual Machine Translator: https://github.com/ptdriscoll/hack-virtual-machine-translator\n- Jack Compiler: https://github.com/ptdriscoll/jack-compiler" } ]
13
amit-gupta-16/Hello-world
https://github.com/amit-gupta-16/Hello-world
66e9abab82582a785e63dae6d7d35f5ec811f838
be446a9e541fd80837c84688d2fd796dcb3776aa
14e1fe36c2db1302a269138ebb72dda5615554a1
refs/heads/master
2022-07-01T08:58:25.577610
2020-05-14T20:05:29
2020-05-14T20:05:29
264,003,485
1
0
null
2020-05-14T19:13:45
2020-05-14T19:45:20
2020-05-14T20:05:29
Python
[ { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 23, "blob_id": "41902c1a74a51cb54a062df62166e6e7c443e04a", "content_id": "e17ad68b7055893b554c6088fe9f876d600f2d20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 144, "license_type": "no_license", "max_line_length": 83, "num_lines": 6, "path": "/README.md", "repo_name": "amit-gupta-16/Hello-world", "src_encoding": "UTF-8", "text": "# Hello-world\nI am learning a lot about python,pygame,tkinter,django,flask,git,version controling\n\nMy name: Amit Gupta\nStay home: Yes\nStudy: Python\n" }, { "alpha_fraction": 0.6919642686843872, "alphanum_fraction": 0.7098214030265808, "avg_line_length": 36.33333206176758, "blob_id": "cbeed053969a8e603bb77e17a36498812357a0c4", "content_id": "04072c0584e0d194cb91767e21635b443a2015f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 59, "num_lines": 6, "path": "/sum.py", "repo_name": "amit-gupta-16/Hello-world", "src_encoding": "UTF-8", "text": "#Take input two numbers\nn1 = int(input(\"Enter first number: \"))\nn2 = int(input(\"Enter second number: \"))\n\nprint(\"The sum of first and second number is: \", n1 + n1)\n# use reliable variable name to make code clear and concise\n" }, { "alpha_fraction": 0.5970149040222168, "alphanum_fraction": 0.5970149040222168, "avg_line_length": 15.736842155456543, "blob_id": "606b0d60712cb81ec43ee3f032d0dd5135280bb1", "content_id": "6bb824cd4f78e3c0a8a1c462683ae425ccdce348", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "no_license", "max_line_length": 34, "num_lines": 19, "path": "/rough.py", "repo_name": "amit-gupta-16/Hello-world", "src_encoding": "UTF-8", "text": "from my_decorators import do_twice\r\n\r\n# @do_twice\r\n# def say_whee(name):\r\n# print(f\"whee! {name}\")\r\n\r\n# a = say_whee(\"karishma\")\r\n# b = say_whee(\"Amit\")\r\n# print(a,b)\r\n\r\n@do_twice\r\ndef return_greet(name):\r\n print(\"Creating greeting\")\r\n return f\"Hi {name}\"\r\n\r\na = return_greet(\"amit\")\r\nprint(a)\r\n\r\nprint(return_greet.__name__)" } ]
3
FishermanZzhang/-offer
https://github.com/FishermanZzhang/-offer
7dfb4dd144790a5c3411118ef8e702e2b0086a25
ce1a41015f12df46135c3054df703563f682d590
5a3acae2856137c4fbc292d70ffc4af8bfe78e4a
refs/heads/master
2021-11-11T22:05:22.157513
2021-10-27T11:51:44
2021-10-27T11:51:44
62,554,675
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.38543516397476196, "alphanum_fraction": 0.394316166639328, "avg_line_length": 24.590909957885742, "blob_id": "e427b953cda3899d6736f0078962667d9f7216e1", "content_id": "953e5a18e1a96a009a6610112a3497acaf205f76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 563, "license_type": "no_license", "max_line_length": 82, "num_lines": 22, "path": "/剑指offer/41_和为s的两个数字.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class solution41_1{\npublic:\n bool findNumbersWithSum(const vector<int>& nums, int sum, pair<int, int>& pp){\n int left = 0;\n int right = nums.size() - 1;\n while (left < right){\n int s = nums[left] + nums[right];\n if (s == sum){\n pp.first = nums[left];\n pp.second = nums[right];\n return true;\n }\n else if (s > sum){\n --right;\n }\n else{\n ++left;\n }\n }\n return false;\n }\n};\n" }, { "alpha_fraction": 0.529691219329834, "alphanum_fraction": 0.529691219329834, "avg_line_length": 13.517241477966309, "blob_id": "bbfe6b565cfe3cc5e8156ef2c03f5871a0350364", "content_id": "74cf13eb05b3c64b9ee004c8cca7cf6e9e3dffd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 421, "license_type": "no_license", "max_line_length": 28, "num_lines": 29, "path": "/剑指offer/21_包含min函数的栈.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "template<typename T>\nclass minStack{\npublic:\n\tminStack() : min_(INT_MAX){\n\t}\n\tvoid push(const T& t){\n\t\tif (min_ > t){\n\t\t\tstack_.push(min_);\n\t\t\tmin_ = t;\n\t\t}\n\t\tstack_.push(t);\n\t}\n\tconst T& top() const{\n\t\treturn stack_.top();\n\t}\n\tconst T& min() const{\n\t\treturn min_;\n\t}\n\tvoid pop(){\n\t\tif (min_ == stack_.top()){\n\t\t\tstack_.pop();\n\t\t\tmin_ = stack_.top();\t\t\t\n\t\t}\n\t\tstack_.pop();\n\t}\nprivate:\n\tT min_;\n\tstd::stack<T> stack_;\n};\n" }, { "alpha_fraction": 0.7948718070983887, "alphanum_fraction": 0.8205128312110901, "avg_line_length": 38, "blob_id": "858895b5a73af453ad07f54edb63b6b0170e245f", "content_id": "2d7d95355f46d5a5119ae182381ca35023233bd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 39, "license_type": "no_license", "max_line_length": 38, "num_lines": 1, "path": "/tf_model_speed/run.sh", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "CUDA_VISIBLE_DEVICES=0 python speed.py\n" }, { "alpha_fraction": 0.3343151807785034, "alphanum_fraction": 0.3483063280582428, "avg_line_length": 29.863636016845703, "blob_id": "bac03e724bb659afde5b504d3f27063c317ff382", "content_id": "6c1c9ee61f48984f800e125eefa90c9f2a88eed3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1358, "license_type": "no_license", "max_line_length": 66, "num_lines": 44, "path": "/剑指offer/36_数组中的逆序对.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "//hard\n//\nclass solution_36{\n public:\n int inversePairs(vector<int>& nums){\n return mergesort(nums, 0, nums.size() - 1);\n }\n private:\n int mergesort(vector<int>& nums, int start, int end){\n if (start < end){\n int mid = start + ((end - start) >> 1);\n int c1 = mergesort(nums, start, mid);\n int c2 = mergesort(nums, mid + 1, end);\n int c3 = merge(nums, start, mid, end);\n return c1 + c2 + c3;\n }\n return 0;\n }\n int merge(vector<int>& nums, int start, int mid, int end){\n int cnt = 0;\n vector<int> tmp(end - start + 1, 0);\n int i = start, j = mid + 1, m = mid, n = end;\n int k = 0;\n while (i <= m && j <= n){\n if (nums[j] < nums[i]){\n cnt += mid - i + 1;\n tmp[k++] = nums[j++];\n }\n else{\n tmp[k++] = nums[i++];\n }\n }\n while (i <= m){\n tmp[k++] = nums[i++];\n }\n while (j <= n){\n tmp[k++] = nums[j++];\n }\n for (int i = start; i <= end; ++i){\n nums[i] = tmp[i - start];\n }\n return cnt;\n }\n};\n" }, { "alpha_fraction": 0.34402331709861755, "alphanum_fraction": 0.3615160286426544, "avg_line_length": 23.5, "blob_id": "ad834a446f98a5162fda9c63e407505a10fcc547", "content_id": "07ad908c7165c5d7610c04d775c4208405159c85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 343, "license_type": "no_license", "max_line_length": 54, "num_lines": 14, "path": "/剑指offer/35_第一个只出现一次的字符.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class solution{\n public:\n char fisrtNotPepeatingChar(const string& str){\n char nums[256] = { 0 };\n for (auto ch : str){\n ++nums[ch];\n }\n for (auto ch : str){\n if (nums[ch] == 1)\n return ch;\n }\n return 0;\n }\n};\n" }, { "alpha_fraction": 0.44789355993270874, "alphanum_fraction": 0.4523281455039978, "avg_line_length": 22.736841201782227, "blob_id": "d0cdc5f7eacfe51a5b7af9ba80e506045100e5b2", "content_id": "62addfac7bc7cfa0dafc043a852eb8d1571aa127", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 451, "license_type": "no_license", "max_line_length": 50, "num_lines": 19, "path": "/剑指offer/33_把数组排成最小的数.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "struct cmp{\n bool operator()(int a, int b){\n string stra = to_string(a);\n string strb = to_string(b);\n return stra + strb < strb + stra;\n }\n};\n\nclass solution33{\n public:\n void printMinNumber(vector<int>& nums){\n sort(nums.begin(), nums.end(), cmp());\n string res = \"\";\n for (auto n : nums){\n res += to_string(n);\n }\n cout << res;\n }\n};\n" }, { "alpha_fraction": 0.5957132577896118, "alphanum_fraction": 0.6045824289321899, "avg_line_length": 44.099998474121094, "blob_id": "b09d8e8cbad1ade484e9a336cc6f25a55ffbb27c", "content_id": "b866a8c0d604129e7c68519a2bc9e932500730d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1353, "license_type": "no_license", "max_line_length": 144, "num_lines": 30, "path": "/剑指offer/24_二叉搜素树的后序遍历序列.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "// no idea ,has glance it\nclass solution_24{\npublic:\n bool verifySequenceOfBST(vector<int>& sequence){\n return verifySequenceOfBST(sequence, 0, sequence.size() - 1);\n }\n bool verifySequenceOfBST(vector<int>& sequence, int start, int end){\n if (start >= end) return true;\n int rootval = sequence[end];\n int index = findFitstMoreThanToot(sequence, start, end - 1, rootval);\n bool flag = verifyMoreThanToot(sequence, index, end - 1, rootval);\n if (flag == false){\n return false;\n }\n flag = verifySequenceOfBST(sequence, start, index - 1);\n if (flag == false){\n return false;\n }\n flag = verifySequenceOfBST(sequence, index, end - 1);\n return flag;\n }\nprivate:\n int findFitstMoreThanToot(vector<int>& sequence, int start, int end, int val){\n return std::find_if(sequence.begin() + start, sequence.begin() + end + 1, [val](int x){return x > val; }) - sequence.begin();\n }\n bool verifyMoreThanToot(vector<int>& sequence, int start, int end, int val){\n //auto pos = std::find_if(sequence.begin() + start, sequence.begin() + end + 1, [val](int x){return x < val; });\n return std::find_if(sequence.begin() + start, sequence.begin() + end + 1, [val](int x){return x < val; }) == sequence.begin() + end + 1;\n }\n};\n" }, { "alpha_fraction": 0.5279069542884827, "alphanum_fraction": 0.5286821722984314, "avg_line_length": 40.6129035949707, "blob_id": "02278081ffcaf276c631181739beef53ee5e8e8c", "content_id": "a51897c9035927e3d1a3c26c0fdb87d07741c4bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1290, "license_type": "no_license", "max_line_length": 82, "num_lines": 31, "path": "/剑指offer/27_二叉搜素树与双向链表.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "// any binary tree to D-linklist\nclass solutionconvert{\n public:\n BinaryTreeNode* convertTree2DoubleLinkList(BinaryTreeNode* root){\n pair<BinaryTreeNode*, BinaryTreeNode*> headtail = convert(root);\n return headtail.first;\n }\n pair<BinaryTreeNode*, BinaryTreeNode*> convert(BinaryTreeNode* root){\n if (root == NULL) return make_pair(nullptr, nullptr);\n pair<BinaryTreeNode*, BinaryTreeNode*> htleft = convert(root->left);\n pair<BinaryTreeNode*, BinaryTreeNode*> htright = convert(root->right);\n if (htleft.second == NULL){\n if (htright.first == NULL){\n return make_pair(root, root);\n }\n root->left = htright.first;\n htright.first->right = root;\n return make_pair(root, htright.second);\n }\n else{\n htleft.second->left = root;\n root->right = htleft.second;\n if (htright.first == NULL){\n return make_pair(htleft.first, root);\n }\n root->left = htright.first;\n htright.first->right = root;\n return make_pair(htleft.first, htright.second);\n }\n }\n};\n" }, { "alpha_fraction": 0.5072363615036011, "alphanum_fraction": 0.5244658589363098, "avg_line_length": 18.092105865478516, "blob_id": "0e3c42ea288a38f430682653cf4c4d13ff8901d4", "content_id": "ea90db7c531a434e402aaae687a8e1e94b7ffdf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1451, "license_type": "no_license", "max_line_length": 41, "num_lines": 76, "path": "/剑指offer/07_用两个栈实现队列.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "#include <stack>\n#include <assert.h>\nusing std::stack;\n\ntemplate<typename T>\nclass myqueue{\n public:\n myqueue();\n ~myqueue();\n void push(const T& t);\n void pop();\n T& front();\n T& back();\n size_t size();\n bool empty();\n private:\n std::stack<T> stack1;\n std::stack<T> stack2;\n T back_;\n};\n\ntemplate<typename T>\nmyqueue<T>::myqueue(){}\n\ntemplate<typename T>\nmyqueue<T>::~myqueue(){};\n\ntemplate<typename T>\nvoid myqueue<T>::push(const T& t){\n stack1.push(t);\n}\n\ntemplate<typename T>\nvoid myqueue<T>::pop(){\n if (stack2.empty()){\n assert(!stack1.empty());\n back_ = stack1.top();\n while (!stack1.empty()){\n T tmp = stack1.top();\n stack1.pop();\n stack2.push(tmp);\n }\n }\n assert(!stack2.empty());\n stack2.pop();\n}\n\ntemplate<typename T>\nT& myqueue<T>::front(){\n if (stack2.empty()){\n assert(!stack1.empty());\n back_ = stack1.top();\n while (!stack1.empty()){\n T tmp = stack1.top();\n stack1.pop();\n stack2.push(tmp);\n }\n }\n return stack2.top();\n}\ntemplate<typename T>\nT& myqueue<T>::back(){\n if (!stack1.empty())\n return stack1.top();\n return back_;\n}\n\ntemplate<typename T>\nsize_t myqueue<T>::size(){\n return stack1.size() + stack2.size();\n}\n\ntemplate<typename T>\nbool myqueue<T>::empty(){\n return size() == 0;\n}\n" }, { "alpha_fraction": 0.41887906193733215, "alphanum_fraction": 0.47492626309394836, "avg_line_length": 13.739130020141602, "blob_id": "539d6edcff0e7d8d955f898016c1f8d966c9d180", "content_id": "2afcf8bc1af7e5871b364c0ffd3f2fe3fd0087fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 339, "license_type": "no_license", "max_line_length": 46, "num_lines": 23, "path": "/剑指offer/10_二进制中1的个数.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "int numberOf1(int n){\n\tint cnt = 0;\n\twhile (n){\n\t\t++cnt;\n\t\tn &= (n - 1);\n\t}\n\treturn cnt;\n}\nint numberOf1_3(int n){\n\tint cnt = 0;\n\tfor (int i = 0; i < 32; ++i){\n\t\tcnt += static_cast<int>((n &(1 << i)) != 0);\n\t}\n\treturn cnt;\n}\nint numberOf1_2(int n){\n\tint cnt = 0;\n\tfor (int i = 0; i < 32; ++i){\n\t\tcnt += n & 1;\n\t\tn >>= 1;\n\t}\n\treturn cnt;\n}\n" }, { "alpha_fraction": 0.7032257914543152, "alphanum_fraction": 0.7032257914543152, "avg_line_length": 30, "blob_id": "5cbf231e52768e66abb6ed91c917e7e7facc2269", "content_id": "b4c9dd97946b4585b9da467911d83e8235dbb2c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 155, "license_type": "no_license", "max_line_length": 53, "num_lines": 5, "path": "/剑指offer/19_二叉树的镜像.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "void mirrorOfTree(TreeNode* root){\n\tif (root == NULL) return;\n\tstd::swap(root->left, root->right);\n\tmirrorOfTree(root->left), mirrorOfTree(root->right);\n}\n" }, { "alpha_fraction": 0.637417197227478, "alphanum_fraction": 0.637417197227478, "avg_line_length": 30.789474487304688, "blob_id": "5245df748cc9fb8b561496ed3cf57af259f60d78", "content_id": "2e1b66cdd8d124a079d8d715359a3326697903b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 604, "license_type": "no_license", "max_line_length": 88, "num_lines": 19, "path": "/剑指offer/18_树的子结构.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "bool hasSubTree(TreeNode* root, TreeNode* subroot){\n\tfunction<bool(TreeNode*, TreeNode*)> same = [&same](TreeNode* root, TreeNode* subroot){\n\t\tif (subroot == NULL) return true;\n\t\tif (root == NULL) return false;\n\t\treturn root->val == subroot->val && \\\n\t\t \t same(root->left, subroot->left) && \\\n\t\t\t same(root->right, subroot->right);\n\t};\n\tif (subroot == NULL) return true;\n\tif (root == NULL) return false;\n\tbool flag = false;\n\tif (root->val == subroot->val){\n\t\tflag = same(root, subroot);\n\t}\n\tif (!flag){\n\t\tflag = hasSubTree(root->left, subroot) || hasSubTree(root->right, subroot);\n\t}\n\treturn flag;\n}\n" }, { "alpha_fraction": 0.5224867463111877, "alphanum_fraction": 0.5264550447463989, "avg_line_length": 17.439023971557617, "blob_id": "bc5efd42dc51f94a989e7174e5af0a1f1161d30a", "content_id": "13e515dd708ebd125e501ff2cfff4da052f3c267", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 768, "license_type": "no_license", "max_line_length": 47, "num_lines": 41, "path": "/剑指offer/02_Singleton.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "//m1 饿汉\nclass Singleton{\n public:\n static Singleton* getSingleton();\n private:\n Singleton(){}\n private:\n static Singleton* sing_;\n};\nSingleton* Singleton:: sing_ = new Singleton();\n\n// m2 饿汉\nclass Singleton{\n public:\n static Singleton* getSingleton(){\n return &sing_;\n }\n private:\n Singleton(){}\n private:\n static Singleton sing_;\n\n};\nSingleton Singleton::sing_;\n\n// m3 懒汉\nclass Singleton{\n public:\n static Singleton* get(){\n if (sing_ == NULL){\n sing_ = new Singleton;\n }\n return sing_;\n }\n private:\n Singleton(){}\n private:\n static Singleton* sing_ ;\n\n};\nSingleton* Singleton::sing_ = NULL;\n" }, { "alpha_fraction": 0.49166667461395264, "alphanum_fraction": 0.49166667461395264, "avg_line_length": 17.461538314819336, "blob_id": "88302492254d7e1ae30c74748331b3aa72965d2d", "content_id": "3dc1d83597a9b0c7b37f7cb9565996fb3e14f091", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 240, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/剑指offer/04_从尾到头打印链表.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "struct ListNode{\n int data;\n ListNode* next;\n};\n\nclass solution{\n public:\n void print(LinstNode* head){\n if(head == NULL) return ;\n print(head->next);\n cout << head->val << endl;\n }\n}\n" }, { "alpha_fraction": 0.4418262243270874, "alphanum_fraction": 0.4639175236225128, "avg_line_length": 27.29166603088379, "blob_id": "7196dc0f03b2e351cf996dbda2d4ddf1f79e1aec", "content_id": "b3a375016fc4c59f07b8bf96f623e8d888950d98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 679, "license_type": "no_license", "max_line_length": 79, "num_lines": 24, "path": "/剑指offer/38_数字在排序数组中的出现的次数.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class solution38{\npublic:\n int getNumberOfK(vector<int>& nums, int k){\n int left = 0;\n int right = nums.size() - 1;\n int index1 = binary_search(nums, left, right, k + .5);\n int index2 = binary_search(nums, left, std::min(index1,right), k - .5);\n return index1 - index2;\n }\nprivate:\n int binary_search(vector<int>& nums, int left, int right, float k){\n int mid = -1;\n while (left <= right){\n mid = left + ((right - left) >> 1);\n if (nums[mid] > k){\n right = mid - 1;\n }\n else{\n left = mid + 1;\n }\n }\n return left;\n }\n};\n" }, { "alpha_fraction": 0.2948490083217621, "alphanum_fraction": 0.307282418012619, "avg_line_length": 24.590909957885742, "blob_id": "1ef295de081f0b81d6fd2c7c3213bad0dcc85b73", "content_id": "6cf236500e73c57d1111e5a7cec62f6a19097e0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 563, "license_type": "no_license", "max_line_length": 69, "num_lines": 22, "path": "/剑指offer/40_数组中出现一次的数字.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class solution40{\n public:\n pair<int, int> findTheTwoNumberAppearOnce(vector<int>& nums){\n int tmp = 0;\n for (auto num : nums){\n tmp ^= num;\n }\n while (tmp & (tmp - 1)){\n tmp &= tmp - 1;\n }\n int x = 0, y = 0;\n for (auto num : nums){\n if (num & tmp){\n x ^= num;\n }\n else{\n y ^= num;\n }\n }\n return make_pair(x, y);\n }\n};\n" }, { "alpha_fraction": 0.5907127261161804, "alphanum_fraction": 0.5928725600242615, "avg_line_length": 20.534883499145508, "blob_id": "f84680c4253b35e51048d3e8cef66ce271b37149", "content_id": "84a7ef67a560ba5dbcec30d595a4f0f4e5683654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 942, "license_type": "no_license", "max_line_length": 65, "num_lines": 43, "path": "/剑指offer/13_在O1时间删除链表节点.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "void DeleteNode(ListNode** head, ListNode* pdeltenode){\n\tif (head == NULL || *head == NULL || pdeltenode == NULL) return;\n\tif ((*head)->next == NULL && (*head) == pdeltenode){\n\t\tdelete pdeltenode;\n\t\t*head == NULL;\n\t}\n\telse if(pdeltenode->next != NULL){\n\t\tpdeltenode->val = pdeltenode->next->val;\n\t\tListNode* p = pdeltenode->next;\n\t\tpdeltenode->next = p->next;\n\t\tdelete p;\n\t}\n\telse{\n\t\t// 尾结点,从头遍历\n\t}\n}\n\n\nclass solution13{\npublic:\n\tvoid DeleteNode(ListNode** head, ListNode* pdeltenode){\n\t\tif (head == NULL || (*head) == NULL || pdeltenode == NULL)\n\t\t\treturn;\n\t\tif ((*head)->next == NULL && pdeltenode == *head){\n\t\t\tdelete pdeltenode;\n\t\t\t*head = NULL;\n\t\t}\n\t\telse if (pdeltenode->next){\n\t\t\tauto q = pdeltenode->next;\n\t\t\tpdeltenode->val = q->val;\n\t\t\tpdeltenode->next = q->next;\n\t\t\tdelete q;\n\t\t}\n\t\telse{\n\t\t\tauto p = *head;\n\t\t\twhile (p->next != pdeltenode){\n\t\t\t\tp = p->next;\n\t\t\t}\n\t\t\tp->next == NULL;\n\t\t\tdelete pdeltenode;\n\t\t}\n\t}\n};\n" }, { "alpha_fraction": 0.5166944861412048, "alphanum_fraction": 0.5242069959640503, "avg_line_length": 38.93333435058594, "blob_id": "21d2149be1780eec61aa230fd185e2dbebf11ce9", "content_id": "7d33bb8e509bb2b0a8ffe3e4ad11647f6a2609cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1198, "license_type": "no_license", "max_line_length": 120, "num_lines": 30, "path": "/剑指offer/06_重建二叉树.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "struct BinaryTreeNode{\n int data;\n BinaryTreeNode* left;\n BinaryTreeNode* right;\n BinaryTreeNode(int val):data(val),left(NULL),right(NULL){}\n};\n\nclass solution{\n public:\n BinaryTreeNode* construct(vector<int>& preorder, vector<int>& inorder){\n if(preorder.size() != inorder.size()){\n return NULL;\n }\n BinaryTreeNode* root = gen(preorder, 0, preorder.size() - 1, \\\n inorder, 0 , inorder.size() - 1);\n return root;\n }\n BinaryTreeNode* gen(vector<int>& preorder, int start, int end,\n vector<int>& inorder, int left, int right){\n if(start > end || left > right) return NULL;\n BinaryTreeNode* root = new BinaryTreeNode(preorder[start]);\n auto it = std::find(inorder.begin() + left, inorder.begin() + right + 1, preorder[start]) - inorder.begin();\n root->left = gen(preorder, start + 1, it - left + start, \\\n inorder, left, it - 1);\n root->right = gen(preorder,it + 1 - right + end, end, \\\n inorder, it + 1, right);\n return root;\n }\n\n};\n" }, { "alpha_fraction": 0.5853174328804016, "alphanum_fraction": 0.601190447807312, "avg_line_length": 20.913043975830078, "blob_id": "bb0bfa807ac4f9a71ce998b0c71b9cc313521fb8", "content_id": "ad881823b8caf393e05d77199c8f560183ec7d59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 504, "license_type": "no_license", "max_line_length": 54, "num_lines": 23, "path": "/utils/scripts/xsync", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\npcount=$#\nif((pcount==0)); then\necho no args;\nexit;\nfi\np1=$1\nfname=`basename $p1`\necho fname=$fname\npdir=`cd -P $(dirname $p1); pwd`\necho pdir=$pdir\nuser=`whoami`\n# change hadoop workers to youself\nworkfile=/home/servers/hadoop-3.1.4/etc/hadoop/workers\nworks=`cat $workfile`\ncat $workfile | xargs -i ssh {} \"mkdir -p $pdir\"\nfor host in $works\ndo\n echo ------------------- $host --------------\n rsync -rvl $pdir/$fname $user@$host:$pdir\n echo ----------------- rsync done --------------\ndone\n" }, { "alpha_fraction": 0.5645161271095276, "alphanum_fraction": 0.5689149498939514, "avg_line_length": 20.3125, "blob_id": "f47b3bfdef1d1c3463dda76fca9698d25dc35c1c", "content_id": "314a7e9bf5ab613a47cd2755f4a7fa9674cebcb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 682, "license_type": "no_license", "max_line_length": 56, "num_lines": 32, "path": "/剑指offer/01_赋值运算符函数.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class CMyString{\n public:\n CMyString(char* pData = NULL);\n CMyString(const CMyString& str);\n ~CMyString()\n CMyString& operator=(const CMyString& str);\n private:\n char* m_pData;\n\n};\n\n//m1:\nCMyString& CMyString::operator = (const CMyString& str){\n if(this == &str){\n return str;\n }\n delete m_pData;\n m_pData = NULL;\n m_pData = new char[strlen(str.m_pData) + 1];\n strcpy(m_pData, str.m_pData);\n return *this;\n}\n\n//m2:\nCMyString& CMyString::operator = (const CMyString& str){\n if(this == &str){\n return *this;\n }\n CMyString tmp(str);\n std::swap(tmp.m_pData, this->m_pData);\n return *this;\n}\n" }, { "alpha_fraction": 0.53751540184021, "alphanum_fraction": 0.5399754047393799, "avg_line_length": 26.100000381469727, "blob_id": "28c01f165eb905163fb62e189a99637af62c1e30", "content_id": "93b67f8aa0df56088922c7fe60f52a8288e98e1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 813, "license_type": "no_license", "max_line_length": 81, "num_lines": 30, "path": "/剑指offer/25_二叉树和为某一个值的路径.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "struct BinaryTreeNode{\n int data;\n BinaryTreeNode* left;\n BinaryTreeNode* right;\n BinaryTreeNode() :data(0), left(NULL), right(NULL){}\n\n};\ntypedef BinaryTreeNode* BinaryTree;\nclass solutionfindpath{\npublic:\n void findpath(BinaryTree root, int sum){\n findpath(root, 0, sum);\n }\n void findpath(BinaryTreeNode* pnode, int k, int sum){\n if (pnode == NULL) return;\n k += pnode->data;\n record_.push_back(pnode->data);\n if (k == sum){ // k== sum && pnode->left == NULL && pnode->right == NULL;\n for (auto i : record_){\n cout << i << \" \";\n }\n cout << endl;\n }\n findpath(pnode->left, k, sum);\n findpath(pnode->right, k, sum);\n record_.pop_back();\n }\nprivate:\n vector<int> record_;\n};\n" }, { "alpha_fraction": 0.3669467866420746, "alphanum_fraction": 0.4075630307197571, "avg_line_length": 31.454545974731445, "blob_id": "893f9247d05e8f83a974230968e287ab26104855", "content_id": "2fee0869264948dc339a0d8528380f5e8952294f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 714, "license_type": "no_license", "max_line_length": 90, "num_lines": 22, "path": "/剑指offer/34_丑数.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class solution34{\n public:\n int getKthUglyNumber(int k){\n vector<int> record(k + 1);\n record[0] = 1;\n int index2 = 0;\n int index3 = 0;\n int index5 = 0;\n for (int i = 1; i <= k; ++i){\n int t = _3min(record[index2] * 2, record[index3] * 3, record[index5] * 5);\n if (t == record[index2] * 2) ++index2;\n if (t == record[index3] * 3) ++index3;\n if (t == record[index5] * 5) ++index5;\n record[i] = t;\n }\n return record[k];\n }\n private:\n int _3min(int x, int y, int z){\n return std::min(x, std::min(y, z));\n }\n};\n" }, { "alpha_fraction": 0.38085106015205383, "alphanum_fraction": 0.3914893567562103, "avg_line_length": 28.375, "blob_id": "20dec4006b1467ddc6770d38fe0dab44e6fbc126", "content_id": "a5daf9002e6f7ee9c723d473d501888e1a7b0927", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 470, "license_type": "no_license", "max_line_length": 56, "num_lines": 16, "path": "/剑指offer/03_二维数组中的查找.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class Solution{\n public:\n bool has(vector<vector<int>>& nums, int target){\n int rows = nums.size();\n if(rows <= 0) return false;\n int cols = nums[0].size();\n int c = cols - 1;\n int r = 0;\n while(c >= 0 && r < rows){\n if(nums[r][c] == target) return true;\n if(nums[r][c] > target) --c;\n else ++r;\n }\n return false;\n }\n};\n" }, { "alpha_fraction": 0.3932346701622009, "alphanum_fraction": 0.4334038197994232, "avg_line_length": 16.518518447875977, "blob_id": "5a7137212b2e443bff0cd98bfbcf1602a898a98e", "content_id": "c4fe46331e0a77ac9adf256dbeab5141774185f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 473, "license_type": "no_license", "max_line_length": 38, "num_lines": 27, "path": "/剑指offer/12_打印1到最大的n位数.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "// no idea\nclass solution12{\npublic:\n\tvoid print1ToMaxofNDigits(int n){\n\t\tstring str(n, '\\0');\n\t\tfor (int i = 1; i <= n; ++i){\n\t\t\tgen(str, 0, i);\n\t\t\t//for (int j = 1; j < 10; ++j){\n\t\t\t\t//str[0] = '0' + j;\n\t\t\t\t//gen(str, 1, i);\n\t\t\t//}\n\t\t}\n\t}\n\tvoid gen(string& str, int pos,int n){\n\t\tif (pos == n){\n\t\t\t//cout << str;\n\t\t\tif (str[0] != '0'){\n\t\t\t\tcout << str << endl;\n\t\t\t}\n\t\t\treturn;\n\t\t}\n\t\tfor (int i = 0; i < 10; ++i){\n\t\t\tstr[pos] = '0' + i;\n\t\t\tgen(str, pos + 1, n);\n\t\t}\n\t}\n};\n" }, { "alpha_fraction": 0.5246376991271973, "alphanum_fraction": 0.5362318754196167, "avg_line_length": 18.16666603088379, "blob_id": "d1e6fd564e15a3ed238e8ab771ac6514be3ab0aa", "content_id": "b1d08078768ad1c90b5cbd674f124f1c86052fdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 345, "license_type": "no_license", "max_line_length": 55, "num_lines": 18, "path": "/剑指offer/22_栈的压入弹出序列.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "bool isPopOrder(vector<int>& pPush, vector<int>& pPop){\n\tassert(pPush.size() == pPop.size());\n\tstd::stack<int> st;\n\tst.push(INT_MIN);\n\tsize_t i = 0;\n\tsize_t j = 0;\n\twhile (1){\n\t\twhile (j < pPop.size() && st.top() == pPop[j]){\n\t\t\tst.pop();\n\t\t\t++j;\n\t\t}\n\t\tif (i < pPush.size())\n\t\t\tst.push(pPush[i++]);\n\t\telse\n\t\t\tbreak;\n\t}\n\treturn st.size() == 1;\n}\n" }, { "alpha_fraction": 0.5780114531517029, "alphanum_fraction": 0.7409178018569946, "avg_line_length": 200.07691955566406, "blob_id": "c85a29dcf81ec252fcf6a9b4fd71dde3796063a0", "content_id": "e1b9b11a1517b33976512374db18799dcc98b6a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5230, "license_type": "no_license", "max_line_length": 302, "num_lines": 26, "path": "/tf_model_speed/README.md", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "# Pre-trained Models\n\nModel | TF-Slim File | Checkpoint | Top-1 Accuracy| Top-5 Accuracy | Time |\n:----:|:------------:|:----------:|:-------:|:--------:|:--------:|\n[Inception V1](http://arxiv.org/abs/1409.4842v1)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v1.py)|[inception_v1_2016_08_28.tar.gz](http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz)|69.8|89.6|0.073565|\n[Inception V2](http://arxiv.org/abs/1502.03167)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v2.py)|[inception_v2_2016_08_28.tar.gz](http://download.tensorflow.org/models/inception_v2_2016_08_28.tar.gz)|73.9|91.8|0.101081 |\n[Inception V3](http://arxiv.org/abs/1512.00567)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v3.py)|[inception_v3_2016_08_28.tar.gz](http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz)|78.0|93.9|0.259961|\n[Inception V4](http://arxiv.org/abs/1602.07261)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v4.py)|[inception_v4_2016_09_09.tar.gz](http://download.tensorflow.org/models/inception_v4_2016_09_09.tar.gz)|80.2|95.2|0.494080|\n[Inception-ResNet-v2](http://arxiv.org/abs/1602.07261)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_resnet_v2.py)|[inception_resnet_v2_2016_08_30.tar.gz](http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz)|80.4|95.3|0.548969|\n[ResNet V1 50](https://arxiv.org/abs/1512.03385)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py)|[resnet_v1_50_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz)|75.2|92.2|0.155431|\n[ResNet V1 101](https://arxiv.org/abs/1512.03385)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py)|[resnet_v1_101_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_101_2016_08_28.tar.gz)|76.4|92.9|0.257862|\n[ResNet V1 152](https://arxiv.org/abs/1512.03385)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py)|[resnet_v1_152_2016_08_28.tar.gz](http://download.tensorflow.org/models/resnet_v1_152_2016_08_28.tar.gz)|76.8|93.2|0.375427|\n[ResNet V2 50](https://arxiv.org/abs/1603.05027)^|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[resnet_v2_50_2017_04_14.tar.gz](http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz)|75.6|92.8|0.156192|\n[ResNet V2 101](https://arxiv.org/abs/1603.05027)^|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[resnet_v2_101_2017_04_14.tar.gz](http://download.tensorflow.org/models/resnet_v2_101_2017_04_14.tar.gz)|77.0|93.7|0.265960|\n[ResNet V2 152](https://arxiv.org/abs/1603.05027)^|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[resnet_v2_152_2017_04_14.tar.gz](http://download.tensorflow.org/models/resnet_v2_152_2017_04_14.tar.gz)|77.8|94.1|0.394746|\n[ResNet V2 200](https://arxiv.org/abs/1603.05027)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v2.py)|[TBA]()|79.9\\*|95.2\\*|0.568921|\n[VGG 16](http://arxiv.org/abs/1409.1556.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py)|[vgg_16_2016_08_28.tar.gz](http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz)|71.5|89.8|0.234780|\n[VGG 19](http://arxiv.org/abs/1409.1556.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py)|[vgg_19_2016_08_28.tar.gz](http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz)|71.1|89.8|0.272675|\n[MobileNet_v1_1.0_224](https://arxiv.org/pdf/1704.04861.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py)|[mobilenet_v1_1.0_224.tgz](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz)|70.9|89.9|0.067472|\n[MobileNet_v1_0.50_160](https://arxiv.org/pdf/1704.04861.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py)|[mobilenet_v1_0.50_160.tgz](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160.tgz)|59.1|81.9|-|\n[MobileNet_v1_0.25_128](https://arxiv.org/pdf/1704.04861.pdf)|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py)|[mobilenet_v1_0.25_128.tgz](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz)|41.5|66.3|-|\n[NASNet-A_Mobile_224](https://arxiv.org/abs/1707.07012)#|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/nasnet.py)|[nasnet-a_mobile_04_10_2017.tar.gz](https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_mobile_04_10_2017.tar.gz)|74.0|91.6|0.129470|\n[NASNet-A_Large_331](https://arxiv.org/abs/1707.07012)#|[Code](https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/nasnet.py)|[nasnet-a_large_04_10_2017.tar.gz](https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_large_04_10_2017.tar.gz)|82.7|96.2|1.545247|\n\n* batch_size=64\n* Tesla P40 \n\n" }, { "alpha_fraction": 0.4930875599384308, "alphanum_fraction": 0.5368663668632507, "avg_line_length": 13.466666221618652, "blob_id": "b27acc44fe00b31132ebd13baa25721baedabddd", "content_id": "c6a1db1076062664500c3470111bf14d6fe2cd47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 434, "license_type": "no_license", "max_line_length": 44, "num_lines": 30, "path": "/剑指offer/17_合并两个排序的链表.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "struct ListNode {\n\tint val;\n\tListNode *next;\n\tListNode(int x) : val(x), next(NULL) {\n\t}\n};\n\nListNode* merge(ListNode* h1, ListNode* h2){\n\tListNode head(0);\n\tListNode* rear = &head;\n\twhile (h1 && h2){\n\t\tif (h1->val > h2->val){\n\t\t\trear->next = h2;\n\t\t\trear = h2;\n\t\t\th2 = h2->next;\n\t\t}\n\t\telse{\n\t\t\trear->next = h1;\n\t\t\trear = h1;\n\t\t\th1 = h1->next;\n\t\t}\n\t}\n\tif (h1){\n\t\trear->next = h1;\n\t}\n\tif (h2){\n\t\trear->next = h2;\n\t}\n\treturn head.next;\n}\n" }, { "alpha_fraction": 0.5768194198608398, "alphanum_fraction": 0.6145552396774292, "avg_line_length": 32.727272033691406, "blob_id": "e683f018ea1a77034f2f6f92f82921a8094930d5", "content_id": "d74eafc7a8e594b006d3212d9760a66594c5b357", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 371, "license_type": "no_license", "max_line_length": 76, "num_lines": 11, "path": "/剑指offer/11_数值的整数次方.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "double Power(double base, int exponent){\n\tfunction<double(double,int)> mypower = [&mypower](double base, unsigned x){\n\t\tif (x == 0) return 1.0;\n\t\tif (x == 1) return base;\n\t\tdouble r = mypower(base, x / 2);\n\t\treturn r * r * ((x & 1) ? base : 1.0);\n\t};\n\tif(base == 0.0) return .0;\n\tif (exponent < 0) return mypower(1.0 /base, -exponent);\n\treturn mypower(base, exponent);\n}\n" }, { "alpha_fraction": 0.5050251483917236, "alphanum_fraction": 0.5301507711410522, "avg_line_length": 19.947368621826172, "blob_id": "7d0f775ae74c90aca9af1d823a94d4934e68f44d", "content_id": "b41a0341ebdc23ed1714ac21ecda1912266215e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 398, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/剑指offer/42_翻转单词顺序.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class solution42_1{\npublic:\n\tvoid reverseSentence(string& str){\n\t\treverse(str, 0, str.size() - 1);\n\t\tint start = 0;\n\t\tfor (size_t i = 0; i < str.size(); ++i){\n\t\t\tif (str[i] == ' ' || i == str.size() - 1){\n\t\t\t\treverse(str, start, i - 1);\n\t\t\t\tstart = i + 1;\n\t\t\t}\n\t\t}\n\t}\nprivate:\n\tvoid reverse(string& str, int start, int end){\n\t\twhile (start < end){\n\t\t\tstd::swap(str[start++], str[end--]);\n\t\t}\n\t}\n};\n" }, { "alpha_fraction": 0.5286144614219666, "alphanum_fraction": 0.5399096608161926, "avg_line_length": 17.97142791748047, "blob_id": "f86074909cdc220fcdfa8e2572e2b13640ed4969", "content_id": "7353e1a2126b627447ce3ca872e8fe83567e7fad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1328, "license_type": "no_license", "max_line_length": 64, "num_lines": 70, "path": "/剑指offer/08_旋转数组的最小数字.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "int min(vector<int>& nums){\n\tint left = 0;\n\tint right = nums.size() - 1;\n\twhile (left < right){\n\t\tint mid = left + (right - left) / 2;\n\t\tif (nums[mid] > nums[right]){\n\t\t\tleft = mid + 1;\n\t\t}\n\t\telse{\n\t\t\tright = mid;\n\t\t}\n\t}\n\treturn nums[left];\n}\n\nint min2(vector<int>& nums){\n\tint left = 0;\n\tint right = nums.size() - 1;\n\tauto linearsearch = [](vector<int>& nums, int left, int right){\n\t\tint mi = nums[left];\n\t\tfor (; left <= right; ++left){\n\t\t\tmi = std::min(mi, nums[left]);\n\t\t}\n\t\treturn mi;\n\t};\n\twhile (left < right){\n\t\tint mid = left + (right - left) / 2;\n\t\tif (nums[left] == nums[mid] && nums[mid] == nums[right]){\n\t\t\treturn linearsearch(nums, left, right);\n\t\t}\n\t\tif (nums[mid] > nums[right]){\n\t\t\tleft = mid + 1;\n\t\t}\n\t\telse{\n\t\t\tright = mid;\n\t\t}\n\t}\n\treturn nums[left];\n}\n\n\n\nclass solution8{\npublic:\n\tint minElement(vector<int>& nums){\n\t\tint left = 0;\n\t\tint right = nums.size() - 1;\n\t\tint mid = -1;\n\t\tauto getminbyliear = [nums](int start, int end){\n\t\t\tint m = INT_MAX;\n\t\t\tfor (int i = start; i <= end; ++i){\n\t\t\t\tm = std::min(m, nums[i]);\n\t\t\t}\n\t\t\treturn m;\n\t\t};\n\t\twhile (left <= right){\n\t\t\tmid = left + ((right - left) >> 1);\n\t\t\tif (nums[mid] == nums[right]){\n\t\t\t\treturn getminbyliear(left, right);\n\t\t\t}\n\t\t\telse if (nums[mid] > nums[right]){\n\t\t\t\tleft = mid + 1;\n\t\t\t}\n\t\t\telse{\n\t\t\t\tright = mid;\n\t\t\t}\n\t\t}\n\t\treturn nums[left];\n\t}\n};\n" }, { "alpha_fraction": 0.36054420471191406, "alphanum_fraction": 0.3741496503353119, "avg_line_length": 22.520000457763672, "blob_id": "7290a2181f880c18e801ba422a551d903bd1ebae", "content_id": "1adae2241f3c486f7758195a84c5a1d513653f48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 588, "license_type": "no_license", "max_line_length": 57, "num_lines": 25, "path": "/剑指offer/41_和为s的连续正序列.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class solution41_2{\npublic:\n vector<pair<int, int>> findContinueSequence(int sum){\n vector<pair<int, int>> res;\n int left = 1;\n int index = 1;\n int t = 0;\n int pos = ((sum + 1) >> 1);\n while (index < sum){\n t += index;\n while (t > sum){\n t -= left;\n ++left;\n }\n if (t == sum){\n res.emplace_back(make_pair(left, index));\n t -= left;\n ++left;\n }\n ++index;\n }\n return res;\n }\nprivate:\n};\n" }, { "alpha_fraction": 0.4299814999103546, "alphanum_fraction": 0.43800124526023865, "avg_line_length": 26.474576950073242, "blob_id": "b044472c9616cfb909f5f9f3d5d24968369f8bfa", "content_id": "fa6e80ca7d660661023e50e948f4fce9267889ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1621, "license_type": "no_license", "max_line_length": 62, "num_lines": 59, "path": "/剑指offer/30_最小的k个数.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "//\nclass solutions30{\npublic:\n vector<int> getLeastKNumbers(vector<int>& nums, int k){\n if (nums.size() <= k) return nums;\n int index = -1;\n int start = 0;\n int end = nums.size() - 1;\n while (index != k){\n if (index > k) end = index - 1;\n else start = index + 1;\n index = partition(nums, start, end);\n }\n vector<int> res(nums.begin(), nums.begin() + k);\n return res;\n }\nprivate:\n int partition(vector<int>& nums, int left, int right){\n int t = nums[left];\n while (left < right){\n while (left < right && nums[right] > t) --right;\n if (left < right){\n std::swap(nums[left++], nums[right]);\n }\n while (left < right && nums[left] < t) ++left;\n if (left < right){\n std::swap(nums[left], nums[right--]);\n }\n }\n nums[left] = t;\n return left;\n }\n};\n\n\nclass solutions30_2{\npublic:\n vector<int> getLeastKNumbers(vector<int>& nums, int k){\n if (nums.size() <= k) return nums;\n priority_queue<int, vector<int>, std::less<int>> heap;\n for (int i = 0; i < k; ++i){\n heap.push(nums[i]);\n }\n for (int i = k; i < nums.size(); ++i){\n int x = heap.top();\n if (x > nums[i]){\n heap.pop();\n heap.push(nums[i]);\n }\n }\n vector<int> res(k);\n for (int i = k - 1; i >= 0; --i){\n res[i] = heap.top();\n heap.pop();\n }\n return res;\n }\nprivate:\n};\n" }, { "alpha_fraction": 0.4571428596973419, "alphanum_fraction": 0.4597402513027191, "avg_line_length": 16.5, "blob_id": "5e678dc21a37abb40d5d2f66345d2dd2ac62629d", "content_id": "39e0560b1e92193352623db3ab658b0fe167274e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 385, "license_type": "no_license", "max_line_length": 47, "num_lines": 22, "path": "/剑指offer/15_链表中倒数第k个结点.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "struct LinkNode{\n int data;\n LinkNode* next;\n};\nLinkNode* findKthOfTail(LinkNode* head, int k){\n if (k <= 0) return NULL;\n LinkNode h;\n h.next = head;\n LinkNode* p = &h;\n while (p && k--){\n p = p->next;\n }\n if (p == NULL){\n return NULL;\n }\n LinkNode* q = &h;\n while (p){\n p = p->next;\n q = q->next;\n }\n return q;\n}\n" }, { "alpha_fraction": 0.3821428716182709, "alphanum_fraction": 0.3883928656578064, "avg_line_length": 30.11111068725586, "blob_id": "5798085e2b30c36fffaaad06e88fe4044ee0681c", "content_id": "7aa0770d738c7b7f14f17ba599663de4e9c38f8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1134, "license_type": "no_license", "max_line_length": 56, "num_lines": 36, "path": "/剑指offer/04_替换空格.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class solution{\n public:\n //length 字符长的总容量\n //length = INT_MAX;\n void replaceBlank(char* const str, int length){\n assert(length > 0);\n if(str == NULL) return;\n char* p = str;\n int numblack = 0;\n int numotherchars = 0;\n while(*p){\n if(*p == ' ') ++numblack;\n else ++numotherchars;\n ++p;\n }\n int numorigian = numblack + numotherchars;\n int numfinal = numblack * 3 + numotherchars;\n if(numfinal <= numorigian)\n return;\n assert(numfinal <= length);\n str[numfinal] = '\\0';\n --numorigian;\n --numfinal;\n while(numfinal > numorigian){\n if(str[numorigian] == ' '){\n str[numfinal--] = '0';\n str[numfinal--] = '2';\n str[numfinal--] = '%';\n }\n else{\n str[numfinal--] = str[numorigian];\n }\n --numorigian;\n }\n }\n};\n" }, { "alpha_fraction": 0.5445544719696045, "alphanum_fraction": 0.5544554591178894, "avg_line_length": 24.25, "blob_id": "f7e4d87f8cb5a7dadc455b66f6397e333e898b63", "content_id": "4ef6b1ed9bfabf3ce209d6d255249cfea0d1b2eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 404, "license_type": "no_license", "max_line_length": 65, "num_lines": 16, "path": "/剑指offer/14_调整数组顺序.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "void reorderOddEven(vector<int>& nums){\n int left = 0;\n int right = nums.size() - 1;\n while (left < right){\n while (left < right && !(nums[left] & 1)) ++left;\n while (left < right && (nums[right] & 1)) --right;\n if (left < right) std::swap(nums[left++], nums[right--]);\n }\n}\n\nvoid reorder(vector<int>& nums, bool (*f)(int)){\n\n}\nvoid reorderOddEven(vector<int>& nums){\n\n}\n" }, { "alpha_fraction": 0.6413994431495667, "alphanum_fraction": 0.6608357429504395, "avg_line_length": 26.078947067260742, "blob_id": "749932f3b2c214c4c4c5580d77708bdffbb6b4a7", "content_id": "dd0aa5a25889863a1994222d8b0b11fb1a7865ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1033, "license_type": "no_license", "max_line_length": 74, "num_lines": 38, "path": "/flask-example/Server.py", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/11/7 下午4:06\n# @File : Server.py\n# @Software: PyCharm\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom flask import request, Flask\nimport os\nimport argparse\nimport json\n\nparser = argparse.ArgumentParser(description='configure web.')\nparser.add_argument('--ip', type=str, default='0.0.0.0', help='server ip')\nparser.add_argument(\"--port\", default=8000, type=int, help='server port')\nargs = parser.parse_args()\n\napp = Flask(__name__)\n\n\[email protected](\"/simple\", methods=['POST'])\ndef simple():\n # print(\"args\", request.args)\n print(\"form\", request.form)\n print(\"data\", type(request.data), request.data)\n # print(type(request.data))\n print(\"json\", type(request.json), request.json)\n # file = request.files['file']\n # print(file.filename)\n info = {}\n return json.dumps(info, ensure_ascii=False, encoding='utf-8')\n\nif __name__ == \"__main__\":\n app.run(args.ip, port=args.port)\n" }, { "alpha_fraction": 0.46008870005607605, "alphanum_fraction": 0.4811529815196991, "avg_line_length": 24.05555534362793, "blob_id": "59611b16f3f5e27033badaabc2c9ef7c518099c7", "content_id": "b9bec4d5c98355749ffefa9fc2c62617c5aadae4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 902, "license_type": "no_license", "max_line_length": 68, "num_lines": 36, "path": "/剑指offer/37_两个链表的第一个公共结点.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "//\nclass solution37{\npublic:\n LinkNode* findFirstCommonNode(LinkNode* head1, LinkNode* head2){\n if (head1 == NULL || head2 == NULL) return NULL;\n int len1 = length(head1);\n int len2 = length(head2);\n LinkNode* longlink = NULL, *shortlink = NULL;\n if (len1 > len2){\n longlink = head1;\n shortlink = head2;\n }\n else{\n longlink = head2;\n shortlink = head1;\n }\n int diff = abs(len1 - len2);\n while (diff--){\n longlink = longlink->next;\n }\n while (longlink != shortlink){\n longlink = longlink->next;\n shortlink = shortlink->next;\n }\n return longlink;\n }\nprivate:\n int length(LinkNode* head){\n int cnt = 0;\n while (head){\n ++cnt;\n head = head->next;\n }\n return cnt;\n }\n};\n" }, { "alpha_fraction": 0.43668121099472046, "alphanum_fraction": 0.4912663698196411, "avg_line_length": 17.31999969482422, "blob_id": "f361e4dff6de13eff2ec2b337e2a649c3a75e8b4", "content_id": "22942d26c625423f77ffaf3749000cf405c192af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 458, "license_type": "no_license", "max_line_length": 44, "num_lines": 25, "path": "/剑指offer/09_斐波那契数列.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "unsigned fib(unsigned n){\n\tunsigned f_1 = 0;\n\tunsigned f = 1;\n\tfor (int i = 1; i <= n; ++i){\n\t\tf += f_1;\n\t\tf_1 = f - f_1;\n\t}\n\treturn f_1;\n}\n\nunsigned fib2(unsigned n, int k){\n\tif (n <= 0) return 0;\n\tif (n == 1) return 1;\n\tif (n == 2) return 2;\n\tvector<int> record(n + 1, 0);\n\trecord[0] = 0;\n\trecord[1] = 1;\n\trecord[2] = 2;\n\tfor (int i = 3; i <= n; ++i){\n\t\tfor (int j = 1; j <= std::min(i, k); ++j){\n\t\t\trecord[i] += record[i - j];\n\t\t}\n\t}\n\treturn record[n];\n}\n" }, { "alpha_fraction": 0.45995423197746277, "alphanum_fraction": 0.4731121361255646, "avg_line_length": 22.30666732788086, "blob_id": "0d01bed693290011d9d7d555255ed00629284a63", "content_id": "4d2ee9e64881f07b9fea3018a5b327655ca545ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1756, "license_type": "no_license", "max_line_length": 54, "num_lines": 75, "path": "/剑指offer/29_数组中出现次数超过一半的数字.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "// m1\nclass solutionsmoreThanHalf{\npublic:\n int moreThanHalf(vector<int>& nums){\n int t = INT_MIN;\n int cnt = 1;\n for (auto num : nums){\n cnt += static_cast<int>(num == t) * 2 - 1;\n if (cnt <= 0){\n t = num;\n cnt = 1;\n }\n }\n if (!checkMoreThanHalf(nums, t)){\n t = INT_MIN;\n }\n return t;\n }\nprivate:\n bool checkMoreThanHalf(vector<int>& nums, int t){\n size_t cnt = 0;\n for (auto num : nums){\n cnt += static_cast<size_t>(num == t);\n }\n return cnt >= nums.size() / 2;\n }\n};\n\n// m2\nclass solutions2{\npublic:\n int moreThanHalf(vector<int>& nums){\n int mid = (nums.size() >> 1) + (nums.size() & 1);\n int start = 0;\n int end = nums.size() - 1;\n int index = -1;\n while (index != mid){\n if (index > mid) end = index - 1;\n else start = index + 1;\n index = partion(nums, start, end);\n }\n int t = nums[mid];\n if (!checkMoreThanHalf(nums, t)){\n t = INT_MIN;\n }\n return t;\n }\nprivate:\n int partion(vector<int>& nums, int start, int end){\n int t = nums[start];\n while (start < end){\n while (start < end && nums[end] > t) --end;\n if (start < end){\n std::swap(nums[start++], nums[end]);\n }\n while (start < end && nums[start] < t) ++start;\n if (start < end){\n std::swap(nums[start], nums[end--]);\n }\n }\n nums[start] = t;\n return start;\n }\n bool checkMoreThanHalf(vector<int>& nums, int t){\n size_t cnt = 0;\n for (auto num : nums){\n cnt += static_cast<size_t>(num == t);\n }\n return cnt >= nums.size() / 2;\n }\n};\n\n\n\n// extend 超过1/3 或者 1/4\n" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7222222089767456, "avg_line_length": 17, "blob_id": "ef245a4f16e591d9f5783b04d97e09f555b9d2e8", "content_id": "42940f5bf31802f3f5437356313ea5a6489a2e1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 18, "license_type": "no_license", "max_line_length": 17, "num_lines": 1, "path": "/剑指offer/23_从上往下打印二叉树.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "// level travesal\n" }, { "alpha_fraction": 0.6399999856948853, "alphanum_fraction": 0.6555555462837219, "avg_line_length": 18.565217971801758, "blob_id": "888358d4af39e09657c7119f892cbc717c7ae84a", "content_id": "ae1baa3b5bb6bfda3ce5c035887da8b1be84a573", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 930, "license_type": "no_license", "max_line_length": 98, "num_lines": 46, "path": "/utils/cmd/cmd.md", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "### cmd \n\n#### 性能检测\nvalgrind --tool=callgrind ${exe}\ngprof2dot -f callgrind callgrind.out.${id} |dot -Tpng -o report-${id}.png\n\nsudo perf record --call-graph dwarf -g ${exe}\nsudo perf report -i \n\n#### 数据包\ntcpdump -i any port ${port} -nn -vv ${name}.pcap\n使用Wireshark打开pcap文件\n\n\n#### clang 调试\napt install lldb\n\n#### docker \n1. push \nfor i in $(docker images | grep harbor | awk '{printf(\"%s:%s\\n\",$1,$2)}'); do docker push $i; done\n2. remove\ndocker rmi -f $(docker images | grep \"vect\" | awk '{print $3}') \n3. pull\n\n4. rename\n```\n#!/bin/bash\n\nimages=(\nquay.io/prometheus/alertmanager:v0.21.0\n)\necho \"s\"\nfor imageName in ${images[@]};\ndo\nname=`echo $imageName| awk -F'/' '{print $NF}'`\necho $name\necho $imageName\ndocker pull $imageName\ndocker tag ${imageName} harbor.vectbase.com/library/${name} \ndone\n\n```\n\n#### git \n1. git submodule foreach git reset --hard\n2. git submodule update --init --recursive\n" }, { "alpha_fraction": 0.4606661796569824, "alphanum_fraction": 0.4769666790962219, "avg_line_length": 27.795917510986328, "blob_id": "4c77a154678ebba9772fb754aabd51d43c74e302", "content_id": "4325ecfbcb67c41c319bf16efe9608f6a02e9a29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1411, "license_type": "no_license", "max_line_length": 91, "num_lines": 49, "path": "/剑指offer/39_二叉树的深度.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "//question 1:\nclass solution39_1{\n public:\n int depth(BinaryTreeNode* root){\n if (root == NULL) return 0;\n int left = depth(root->left);\n int right = depth(root->right);\n return std::max(left, right) + 1;\n }\n};\n\n\n//question 2\nclass solution39_2{\n public:\n bool isBlance(BinaryTreeNode* root){\n if (root == NULL) return true;\n int left = depth(root->left);\n int right = depth(root->right);\n return abs(left - right) <= 1 && isBlance(root->left) && isBlance(root->right);\n }\n int depth(BinaryTreeNode* root){\n if (root == NULL) return 0;\n int left = depth(root->left);\n int right = depth(root->right);\n return std::max(left, right) + 1;\n }\n};\n\nclass solution39_2_2{\n public:\n bool isBlance(BinaryTreeNode* root){\n int d = 0;\n return isBlance(root, d);\n }\n bool isBlance(BinaryTreeNode* root, int& depth){\n if (root == NULL){\n depth = 0;\n return true;\n }\n int left = 0;\n int right = 0;\n if (isBlance(root->left, left) && isBlance(root->right, right)){\n depth = std::max(left, right) + 1;\n return abs(left - right) <= 1;\n }\n return false;\n }\n};\n" }, { "alpha_fraction": 0.5297619104385376, "alphanum_fraction": 0.569727897644043, "avg_line_length": 27, "blob_id": "450348b36d99156ea31048e294ddc7c4c3c924b5", "content_id": "2a6bd9aab48f7a397b97c705dc32a5f90d18b0df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1186, "license_type": "no_license", "max_line_length": 78, "num_lines": 42, "path": "/flask-example/Client.py", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/11/7 下午4:06\n# @File : Client.py\n# @Software: PyCharm\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport requests\nimport os\nimport numpy as np\nimport json\nimport base64\n\ndata = {\"data\": u\"中国人\"}\nwith open(\"image.jpg\") as f:\n img = f.read()\n\nimg = base64.encodestring(img)\ndata['img'] = img\nfiles = {'file': (\"image.jpg\", open(\"image.jpg\"), 'application/octet-stream')}\nfiles = {'file': open(\"image.jpg\")}\ndata = json.dumps(data)\nheaders = {'Content-Type': 'application/json'}\n# headers = {'Content-Type': 'multipart/form-data'}\n# r = requests.post(\"http://0.0.0.0:8000/simple\",\n# data=data,\n# json=jsondata,\n# files=files)\nr = requests.post(\"http://0.0.0.0:8000/simple\",\n headers=headers,\n data=data)\n# r = requests.post(\"http://0.0.0.0:8000/simple\",\n# data=data,\n# files=files)\n# r = requests.post(\"http://0.0.0.0:8000/simple\",\n# data=data,\n# files=files)\ntext = r.text\nprint(text)\n" }, { "alpha_fraction": 0.5087719559669495, "alphanum_fraction": 0.5236842036247253, "avg_line_length": 16.538461685180664, "blob_id": "39eed1b52d850403fb6c4a9457efffbf48b82f10", "content_id": "86edf6cd5fa5b0e9446ef86e9cede79d74153a93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1140, "license_type": "no_license", "max_line_length": 41, "num_lines": 65, "path": "/剑指offer/07_用两个队列实现一个栈.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "//stack\ntemplate<typename T>\nclass mystack{\n public:\n mystack();\n ~mystack();\n T& top();\n void push(const T& t);\n void pop();\n size_t size();\n bool empty();\n private:\n queue<T> que1;\n queue<T> que2;\n};\ntemplate<typename T>\nmystack<T>::mystack(){}\n\ntemplate<typename T>\nmystack<T>::~mystack(){}\n\ntemplate<typename T>\nvoid mystack<T>::push(const T& t){\n if (!que2.empty()){\n que2.push(t);\n }\n else{\n que1.push(t);\n }\n}\n\ntemplate<typename T>\nvoid mystack<T>::pop(){\n queue<T>* quehave = &que1;\n queue<T>* queempty = &que2;\n if (!que2.empty()){\n quehave = &que2;\n queempty = &que1;\n }\n while (quehave->size() > 1){\n queempty->push(quehave->front());\n quehave->pop();\n }\n quehave->pop();\n}\n\ntemplate<typename T>\nT& mystack<T>::top(){\n if (!que2.empty()){\n return que2.back();\n }\n else{\n return que1.back();\n }\n}\n\ntemplate<typename T>\nsize_t mystack<T>::size(){\n return que1.size() + que2.size();\n}\n\ntemplate<typename T>\nbool mystack<T>::empty(){\n return size() == 0;\n}\n" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.65625, "avg_line_length": 31, "blob_id": "6a87947809fcd0211c18f5c8451d7b745460772c", "content_id": "998e0ec4d6d99fb74d2ca24e3a0d6e257a02310e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 128, "license_type": "no_license", "max_line_length": 96, "num_lines": 4, "path": "/flask-example/README.md", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "# flask-simple\n\n# curl example\n`curl -H \"Content-Type:application/json\" -X POST -'{\"data\":\"data\"}' http://0.0.0.0:8000/simple`\n" }, { "alpha_fraction": 0.6141079068183899, "alphanum_fraction": 0.6265560388565063, "avg_line_length": 23.100000381469727, "blob_id": "25ba2bee92c636a292b2a0ada493969038ffc0f5", "content_id": "24b8b6bb054fe317155687c3c78e77d1daa8a242", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 241, "license_type": "no_license", "max_line_length": 48, "num_lines": 10, "path": "/剑指offer/42_左旋字符串.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class solution42_2{\npublic:\n\tvoid leftRotateString(string& str, unsigned n){\n\t\tn %= str.size();\n\t\tstd::reverse(str.begin(), str.begin() + n);\n\t\tstd::reverse(str.begin() + n, str.end());\n\t\tstd::reverse(str.begin(), str.end());\n\t}\nprivate:\n};\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 17, "blob_id": "3824503c606409167f18c07d2e496d6630ce899d", "content_id": "8e5dab5102fae414a9e212d95718a54a040b3cc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 18, "license_type": "no_license", "max_line_length": 17, "num_lines": 1, "path": "/剑指offer/26_复杂链表的赋值.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "// do ti sometime\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 9, "blob_id": "026b2413992c0f1abcccc0f6bab9752978bc0c5b", "content_id": "4161c8b02107ef906b5f40da9184fcee5fe2bc1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 22, "license_type": "no_license", "max_line_length": 9, "num_lines": 1, "path": "/剑指offer/31_连续子数组的最大和.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "// 连续子序列和\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 9, "blob_id": "c90affb47bdb59dcf61a8c1ad70b1f00c5292055", "content_id": "b28d8f5a97c3ef9a9da14a086a6508a87c04cb5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10, "license_type": "no_license", "max_line_length": 9, "num_lines": 1, "path": "/剑指offer/32_从1到n整数中1出现的次数.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "//no idea\n" }, { "alpha_fraction": 0.515418529510498, "alphanum_fraction": 0.515418529510498, "avg_line_length": 19.636363983154297, "blob_id": "71421c57a41757d674bca1601e3b05e4e4ef07d2", "content_id": "6d16e0322fe36ac22dea598d4d3c8baf2195721a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 227, "license_type": "no_license", "max_line_length": 38, "num_lines": 11, "path": "/剑指offer/16_反转链表.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "LinkNode* reverseList(LinkNode* head){\n LinkNode h;\n h.next = NULL;\n while (head){\n LinkNode* tmp = head->next;\n head->next = h.next;\n h.next = head;\n head = tmp;\n }\n return h.next;\n}\n" }, { "alpha_fraction": 0.40183761715888977, "alphanum_fraction": 0.40971311926841736, "avg_line_length": 27.06842041015625, "blob_id": "38c88aa55cf402be645506d1a2342c5c024dba7e", "content_id": "f19d7f4435f67631c0209d7e9b9e48722f0f4e44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5333, "license_type": "no_license", "max_line_length": 67, "num_lines": 190, "path": "/剑指offer/sort.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "#include<iostream>\n#include<string>\n#include<functional>\n#include<memory>\n#include<map>\n#include<unordered_map>\n#include<direct.h>\n#include<queue>\n#include<stack>\n#include<fstream>\n#include<time.h>\n\nusing namespace std;\n\nclass solution_sort{\n public:\n virtual void sort(vector<int>& nums) = 0;\n};\n\nclass solution_mergesort : public solution_sort{\n public:\n void sort(vector<int>& nums){\n mergesort(nums, 0, nums.size() - 1);\n }\n private:\n void mergesort(vector<int>& nums, int start, int end){\n if (start < end){\n int mid = start + ((end - start) >> 1);\n mergesort(nums, start, mid);\n mergesort(nums, mid + 1, end);\n merge(nums, start, mid, end);\n }\n }\n void merge(vector<int>& nums, int start, int mid, int end){\n vector<int> tmp(end - start + 1, 0);\n int i = start, j = mid + 1, m = mid, n = end;\n int k = 0;\n while (i <= m && j <= n){\n if (nums[i] < nums[j]){\n tmp[k++] = nums[i++];\n }\n else{\n tmp[k++] = nums[j++];\n }\n }\n while (i <= m){\n tmp[k++] = nums[i++];\n }\n while (j <= n){\n tmp[k++] = nums[j++];\n }\n for (int i = start; i <= end; ++i){\n nums[i] = tmp[i - start];\n }\n }\n};\n\nclass solution_quicksort : public solution_sort{\n public:\n void sort(vector<int>& nums){\n quicksort(nums, 0, nums.size() - 1);\n }\n private:\n void quicksort(vector<int>& nums, int start, int end){\n if (start < end){\n int index = partition(nums, start, end);\n quicksort(nums, start, index - 1);\n quicksort(nums, index + 1, end);\n }\n }\n int partition(vector<int>& nums, int start, int end){\n int t = nums[start];\n while (start < end){\n while (start < end && nums[end] > t) --end;\n if (start < end){\n nums[start++] = nums[end];\n }\n while (start < end && nums[start] <= t) ++start;\n if (start < end){\n nums[end--] = nums[start];\n }\n }\n nums[start] = t;\n return start;\n }\n};\n\nclass solution_heapsort : public solution_sort{\n public:\n void sort(vector<int>& nums){\n heap_sort(nums);\n }\n private:\n void adjustHead(vector<int>& nums, int i, int length){\n if (i <= (length - 1) /2 ){\n int left = 2 * i + 1;\n int right = 2 * i + 2;\n int max_pos = i;\n if (left < length && nums[left] > nums[max_pos]){\n max_pos = left;\n }\n if (right < length && nums[right] > nums[max_pos]){\n max_pos = right;\n }\n if (max_pos != i){\n std::swap(nums[max_pos], nums[i]);\n adjustHead(nums, max_pos, length);\n }\n }\n }\n void adjustHead2(vector<int>& nums, int i, int length){\n while (i <= (length - 1) / 2){\n int left = 2 * i + 1;\n int right = 2 * i + 2;\n int max_pos = i;\n if (left < length && nums[left] > nums[max_pos]){\n max_pos = left;\n }\n if (right < length && nums[right] > nums[max_pos]){\n max_pos = right;\n }\n if (max_pos != i){\n std::swap(nums[max_pos], nums[i]);\n i = max_pos;\n }\n else{\n break;\n }\n }\n }\n void buildHeap(vector<int>& nums){\n for (int i = nums.size() / 2 - 1; i >= 0; --i){\n adjustHead2(nums, i, nums.size());\n }\n }\n void heap_sort(vector<int>& nums){\n buildHeap(nums);\n for (int i = nums.size() - 1; i > 0; --i){\n std::swap(nums[0], nums[i]);\n adjustHead2(nums, 0, i);\n }\n }\n};\n\n\n\nint main(){\n const int N = ((1 << 15) - 1);\n vector<int> se(N, 0);\n int i = 0;\n std::generate(se.begin(), se.end(), [&i](){return i++; });\n std::random_shuffle(se.begin(), se.end());\n\n auto print = [](vector<int>& nums){\n for (auto n : nums){\n cout << n << \" \";\n }\n cout << endl;\n\n };\n cout << \"shuffle\" << endl;\n //print(se);\n\n time_t st ,ed;\n\n\n // quick sort\n std::shared_ptr<solution_sort> sm(new solution_quicksort);\n st = clock();\n sm->sort(se);\n ed = clock();\n cout << \"quick sort: use \" << (ed - st) << \" ms\" << endl;\n //print(se);\n\n std::random_shuffle(se.begin(), se.end());\n cout << \"shuffle\" << endl;\n //print(se);\n\n //merge sort\n sm.reset(new solution_mergesort);\n st = clock();\n sm->sort(se);\n ed = clock();\n cout << \"merge sort: use \" << (ed - st) << \" ms\" << endl;\n //print(se);\n\n\n system(\"pause\");\n return 0;\n}\n" }, { "alpha_fraction": 0.4774226248264313, "alphanum_fraction": 0.5134449601173401, "avg_line_length": 31.850000381469727, "blob_id": "3cd0ad7dd802f7e9bb963299a06b29aeab419a07", "content_id": "9c9ed3444e7074c10d7dad9f0ff752715e1869a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1971, "license_type": "no_license", "max_line_length": 81, "num_lines": 60, "path": "/tf_model_speed/speed.py", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nfrom nets import nets_factory\nimport glog as logging\nimport time\n\n\ndef tf_model_speed():\n num_classes = 1000\n weight_decay = 0.0001\n is_training = False\n batch_size = 64\n test_num = 10\n model_names = [\n 'inception_v1', 'inception_v2', 'inception_v3', 'inception_v4',\n 'inception_resnet_v2',\n 'resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152', 'resnet_v1_200',\n 'resnet_v2_50', 'resnet_v2_101', 'resnet_v2_152', 'resnet_v2_200',\n 'vgg_16', 'vgg_19',\n 'alexnet_v2',\n 'mobilenet_v1', 'mobilenet_v1_075','mobilenet_v1_050','mobilenet_v1_025',\n 'nasnet_mobile', 'nasnet_large',\n ]\n for model_name in model_names:\n logging.info(model_name)\n with tf.Graph().as_default():\n model_fn = nets_factory.get_network_fn(\n model_name,\n num_classes=num_classes,\n weight_decay=weight_decay,\n is_training=is_training)\n default_img_size = model_fn.default_image_size\n inputs = tf.random_normal(\n dtype=tf.float32,\n shape=[batch_size, default_img_size, default_img_size, 3])\n logits, _ = model_fn(\n inputs, )\n # for v in tf.trainable_variables():\n # logging.info(v.op.name)\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n sess.run(logits)\n start = time.time()\n for _ in range(test_num):\n sess.run(logits)\n end = time.time()\n logging.info(\"%s--default_image_size: %d--time: %f\" %\n (model_name, default_img_size,\n 1. * (end - start) / test_num))\n\n\ndef main(_):\n tf_model_speed()\n\n\nif __name__ == '__main__':\n tf.app.run()\n" }, { "alpha_fraction": 0.5438596606254578, "alphanum_fraction": 0.5497075915336609, "avg_line_length": 22.586206436157227, "blob_id": "12b7951114478532fa6cbb04e940004e37bac29e", "content_id": "435adda2806d22226a0122242bfb195cf36a75ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 684, "license_type": "no_license", "max_line_length": 56, "num_lines": 29, "path": "/剑指offer/08_quicksort.cpp", "repo_name": "FishermanZzhang/-offer", "src_encoding": "UTF-8", "text": "class quicksort{\n\tpublic:\n\t\tint partiton(vector<int>& nums, int start, int end){\n\t\t\tint t = nums[start];;\n\t\t\twhile (start < end){\n\t\t\t\twhile (start < end && nums[end] > t)\n\t\t\t\t\t--end;\n\t\t\t\tif (start < end){\n\t\t\t\t\tnums[start++] = nums[end];\n\t\t\t\t}\n\t\t\t\twhile (start < end && nums[start] < t)\n\t\t\t\t\t++start;\n\t\t\t\tif (start < end)\n\t\t\t\t\tnums[end--] = nums[start];\n\t\t\t}\n\t\t\tnums[start] = t;\n\t\t\treturn start;\n\t\t}\n\t\tvoid quicksort(vector<int>& nums, int start, int end){\n\t\t\tif (start < end){\n\t\t\t\tint poi = partiton(nums, start, end);\n\t\t\t\tquicksort(nums, start, poi - 1);\n\t\t\t\tquicksort(nums, poi + 1, end);\n\t\t\t}\n\t\t}\n\t\tvoid quicksort(vector<int>& nums){\n\t\t\tquicksort(nums, 0, nums.size() - 1);\n\t\t}\n};\n" } ]
53
rti/poodle-backend-django
https://github.com/rti/poodle-backend-django
80e7f41ae6857c8bdebecc6fbf75693f63486189
8905e8003c8a6b809f4b5749e84745a6536b7f52
23b399e28c74eb357aa38ec1f7c68403416116f8
refs/heads/main
2023-07-15T18:38:47.900211
2021-01-25T15:45:40
2021-01-25T15:45:40
325,505,401
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5849524736404419, "alphanum_fraction": 0.5874328017234802, "avg_line_length": 29.237499237060547, "blob_id": "02224c494adc2242c197fad1e14fa3f43cd51f62", "content_id": "86d2b84d28360ed0912dbcaebc36b14b9020f4b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2419, "license_type": "no_license", "max_line_length": 79, "num_lines": 80, "path": "/app/models.py", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Query(models.Model):\n name = models.CharField(max_length=512)\n\n def __str__(self):\n return self.name\n\n def choices(self):\n return [choice for option in self.options.all()\n for choice in option.choices.all()]\n\n class Meta:\n verbose_name_plural = \"Queries\"\n\n\nclass Option(models.Model):\n begin_date = models.DateField()\n begin_time = models.TimeField(blank=True, null=True)\n end_date = models.DateField(blank=True, null=True)\n end_time = models.TimeField(blank=True, null=True)\n query = models.ForeignKey(\n Query, related_name='options', on_delete=models.CASCADE)\n\n def time_str(time):\n if time:\n return time.strftime('%H:%M')\n return None\n\n def begin_time_short(self):\n return Option.time_str(self.begin_time)\n\n def end_time_short(self):\n return Option.time_str(self.end_time)\n\n # TODO: appending the Query name here is only required to identify\n # Options in the admin form. Find a way to only append the Query name there\n def __str__(self):\n result = str(self.begin_date)\n if self.begin_time:\n result += ' ' + str(self.begin_time_short())\n if self.end_date or self.end_time:\n result += ' -'\n if self.end_date:\n result += ' ' + str(self.end_date)\n if self.end_time:\n result += ' ' + str(self.end_time_short())\n return '%s (%s)' % (result, str(self.query))\n\n\nclass Attendee(models.Model):\n name = models.CharField(max_length=64)\n\n def __str__(self):\n return self.name\n\n\nclass Status(models.TextChoices):\n YES = 'Y', 'Yes'\n NO = 'N', 'No'\n MAYBE = 'M', 'Maybe'\n\n\nclass Choice(models.Model):\n attendee = models.ForeignKey(\n Attendee, related_name='choices', on_delete=models.CASCADE)\n option = models.ForeignKey(\n Option, related_name='choices', on_delete=models.CASCADE)\n status = models.CharField(max_length=1, choices=Status.choices)\n\n def __str__(self):\n return (self.attendee.name + '\\'s choice for \"' +\n self.option.query.name + '\": ' +\n str(self.option.begin_date) + ' ' + str(self.status) + '')\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=['attendee', 'option'], name='unique_choice')]\n" }, { "alpha_fraction": 0.7568517327308655, "alphanum_fraction": 0.7596626877784729, "avg_line_length": 20.560606002807617, "blob_id": "c447974e6156e7e6a9f1895851f23c20e18d84c8", "content_id": "92b02d42456855461e0ef1ef4dfcd12000d69052", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 1423, "license_type": "no_license", "max_line_length": 77, "num_lines": 66, "path": "/Dockerfile", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "# thanks to https://blog.bitsacm.in/django-on-docker/\nFROM python:slim as fundamental\n\n# entrypoint.sh is using netcat to wait for db to start up\nRUN apt-get update && apt-get install -y netcat \\\n && apt-get remove --purge --auto-remove -y \\\n && rm -rf /var/lib/apt/lists/*\n\n# create an app user in the app group\nRUN useradd --user-group --create-home --no-log-init --shell /bin/bash django\n\n# project's src home directory\nENV PROJECT_HOME=/home/django/project\n\n# create required directories\nRUN mkdir -p $PROJECT_HOME/static\n\n# cd to working dir\nWORKDIR $PROJECT_HOME\n\n# get the pip requirements file\nCOPY requirements.txt .\n\n# install python dependencies\nRUN pip install --upgrade pip\nRUN pip install -r requirements.txt\n\n# start the entrypoint script\nENTRYPOINT [\"./django-entrypoint.sh\"]\n\n#\n# development build target\n#\nFROM fundamental as development\n\n# setup python for development\nENV PYTHONUNBUFFERED 1\nENV PYTHONDONTWRITEBYTECODE 1\n\n# setup django for development\nENV DJANGO_DEBUG True\n\n#\n# production build target\n#\nFROM fundamental as production\n\n# setup python for production\nENV PYTHONBUFFERED 1\nENV PYTHONWRITEBYTECODE 1\n\n# setup django for development\nENV DJANGO_DEBUG False\n\n# copy src into container\nCOPY app app\nCOPY project project\nCOPY manage.py .\nCOPY django-entrypoint.sh .\nRUN chmod a+x django-entrypoint.sh\n\n# adjust ownership\nRUN chown -R django:django .\n\n# drop privs\nUSER django:django\n" }, { "alpha_fraction": 0.6903765797615051, "alphanum_fraction": 0.6903765797615051, "avg_line_length": 27.058822631835938, "blob_id": "9abe19c283edb207ff1a67f807233942cab5fdc3", "content_id": "e017a19d0515e69dc25835c42d5d12eb0a180be5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 478, "license_type": "no_license", "max_line_length": 71, "num_lines": 17, "path": "/nginx/Dockerfile", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "FROM nginx:stable\n\nRUN apt-get update && apt-get install -y inotify-tools \\\n && apt-get remove --purge --auto-remove -y \\\n && rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/nginx.list\n\nCOPY ssl.conf /etc/nginx/conf.d/\nCOPY default.conf /etc/nginx/conf.d/\n\nCOPY nginx-init-ssl.sh /\nRUN chmod a+x /nginx-init-ssl.sh\n\nCOPY nginx-start-watch-certs.sh /\nRUN chmod a+x /nginx-start-watch-certs.sh\n\nENTRYPOINT [\"/nginx-init-ssl.sh\"]\nCMD [\"/nginx-start-watch-certs.sh\"]\n\n" }, { "alpha_fraction": 0.5931097269058228, "alphanum_fraction": 0.6195703148841858, "avg_line_length": 51.41594314575195, "blob_id": "6082121aaf534daf7383942ac2a1602cfe28ae03", "content_id": "4fad345a0989248b2cd93179fbf1fdfc6d34d879", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36167, "license_type": "no_license", "max_line_length": 149, "num_lines": 690, "path": "/app/tests.py", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "from datetime import date, time\nfrom django.contrib.auth.models import User\nfrom django.db import utils\nfrom django.test import TestCase\nfrom re import match\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom app.models import Query, Option, Attendee, Choice\n\n\nclass ModelRelationsTest(TestCase):\n def setUp(self):\n self.query = Query.objects.create(name='When can we meet?')\n self.options = [\n Option.objects.create(begin_date='2021-01-01', query=self.query),\n Option.objects.create(begin_date='2021-01-02', query=self.query),\n Option.objects.create(begin_date='2021-01-03', query=self.query), ]\n self.attendees = [\n Attendee.objects.create(name='Alisa'),\n Attendee.objects.create(name='Asisa'), ]\n self.choices = [\n Choice.objects.create(option=self.options[0], attendee=self.attendees[0], status='Y'),\n Choice.objects.create(option=self.options[1], attendee=self.attendees[0], status='N'),\n Choice.objects.create(option=self.options[2], attendee=self.attendees[0], status='Y'),\n Choice.objects.create(option=self.options[0], attendee=self.attendees[1], status='M'),\n Choice.objects.create(option=self.options[1], attendee=self.attendees[1], status='Y'),\n Choice.objects.create(option=self.options[2], attendee=self.attendees[1], status='Y'), ]\n\n def test_prerequisites(self):\n self.assertIsNotNone(self.query)\n self.assertEqual(len(self.options), 3)\n self.assertEqual(len(self.query.options.all()), 3)\n self.assertEqual(len(self.query.choices()), 6)\n\n self.assertEqual(len(self.attendees), 2)\n self.assertEqual(len(self.attendees[0].choices.all()), 3)\n self.assertEqual(len(self.attendees[0].choices.all()), 3)\n\n self.assertEqual(len(self.choices), 6)\n self.assertEqual(len(self.options[0].choices.all()), 2)\n self.assertEqual(len(self.options[1].choices.all()), 2)\n self.assertEqual(len(self.options[2].choices.all()), 2)\n\n def test_unique_choice(self):\n try:\n Choice.objects.create(option=self.options[0], attendee=self.attendees[0], status='M')\n self.fail\n except utils.IntegrityError:\n pass\n\n def test_delete_attendee_deletes_choices(self):\n self.assertEqual(len(self.query.choices()), 6)\n\n self.attendees[0].delete()\n self.assertEqual(len(self.query.choices()), 3)\n\n self.attendees[1].delete()\n self.assertEqual(len(self.query.choices()), 0)\n\n def test_delete_option_deletes_choices(self):\n self.assertEqual(len(self.query.choices()), 6)\n\n self.options[0].delete()\n self.assertEqual(len(self.query.choices()), 4)\n\n self.options[1].delete()\n self.assertEqual(len(self.query.choices()), 2)\n\n self.options[2].delete()\n self.assertEqual(len(self.query.choices()), 0)\n\n def test_delete_query_deletes_options_and_choices(self):\n self.assertEqual(len(self.query.options.all()), 3)\n self.assertEqual(len(self.query.choices()), 6)\n\n self.query.delete()\n\n self.assertEqual(len(Option.objects.all()), 0)\n self.assertEqual(len(Choice.objects.all()), 0)\n\n\nclass OptionModelTest(TestCase):\n def setUp(self):\n self.query = Query.objects.create(name='When can we meet?')\n self.option = Option.objects.create(begin_date='2021-01-01', query=self.query)\n\n def test_option_string(self):\n self.assertEqual(str(self.option), '2021-01-01 (When can we meet?)')\n self.option.begin_time = time(18, 00)\n self.assertEqual(str(self.option), '2021-01-01 18:00 (When can we meet?)')\n self.option.end_time = time(19, 00)\n self.assertEqual(str(self.option), '2021-01-01 18:00 - 19:00 (When can we meet?)')\n self.option.end_date = date(2021, 1, 2)\n self.option.end_time = time(3, 00)\n self.assertEqual(str(self.option), '2021-01-01 18:00 - 2021-01-02 03:00 (When can we meet?)')\n\n\nclass QueryApiAnonTest(APITestCase):\n # TODO: add some fail tests, e.g. invalid ids\n\n @classmethod\n def setUpTestData(cls):\n cls.query = Query.objects.create(name='When can we meet?')\n cls.options = [\n Option.objects.create(begin_date='2021-01-01', begin_time='18:00:00', end_date='2021-01-02', end_time='03:00:00', query=cls.query),\n Option.objects.create(begin_date='2021-01-02', begin_time='18:00:00', end_date='2021-01-03', end_time='03:00:00', query=cls.query),\n Option.objects.create(begin_date='2021-01-03', begin_time='18:00:00', end_date='2021-01-04', end_time='03:00:00', query=cls.query), ]\n cls.attendees = [\n Attendee.objects.create(name='Alisa'),\n Attendee.objects.create(name='Asisa'),\n Attendee.objects.create(name='Takatuka'), ]\n cls.choices = [\n Choice.objects.create(option=cls.options[0], attendee=cls.attendees[0], status='Y'),\n Choice.objects.create(option=cls.options[1], attendee=cls.attendees[0], status='N'),\n Choice.objects.create(option=cls.options[2], attendee=cls.attendees[0], status='Y'),\n Choice.objects.create(option=cls.options[0], attendee=cls.attendees[1], status='M'),\n Choice.objects.create(option=cls.options[1], attendee=cls.attendees[1], status='Y'),\n Choice.objects.create(option=cls.options[2], attendee=cls.attendees[1], status='Y'), ]\n\n # root --------------------------------------------------------------------\n def test_get_root(self):\n response = self.client.get('/app/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n self.assertIsNotNone(json['queries'])\n self.assertIsNotNone(json['options'])\n self.assertIsNotNone(json['choices'])\n self.assertIsNotNone(json['attendees'])\n self.assertTrue(match(r'^https?://[a-zA-Z-.]+/app/queries/\\?format=json$', json['queries']))\n self.assertTrue(match(r'^https?://[a-zA-Z-.]+/app/options/\\?format=json$', json['options']))\n self.assertTrue(match(r'^https?://[a-zA-Z-.]+/app/choices/\\?format=json$', json['choices']))\n self.assertTrue(match(r'^https?://[a-zA-Z-.]+/app/attendees/\\?format=json$', json['attendees']))\n\n def test_post_root(self):\n response = self.client.post('/app/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_put_root(self):\n response = self.client.put('/app/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_patch_root(self):\n response = self.client.patch('/app/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_delete_root(self):\n response = self.client.delete('/app/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_options_root(self):\n response = self.client.options('/app/', {'format': 'json'})\n self.assertEqual(response.status_code, 200)\n # TODO: implement me\n\n # query list --------------------------------------------------------------\n def test_get_query_list(self):\n response = self.client.get('/app/queries/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_post_query_list(self):\n response = self.client.post('/app/queries/', {'name': 'New Query'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertGreaterEqual(int(json['id']), 1)\n self.assertEqual(json['name'], 'New Query')\n self.assertEqual(json['options'], [])\n\n def test_put_query_list(self):\n response = self.client.put('/app/queries/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_patch_query_list(self):\n response = self.client.patch('/app/queries/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_delete_query_list(self):\n response = self.client.delete('/app/queries/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_options_query_list(self):\n response = self.client.options('/app/queries/', {'format': 'json'})\n self.assertEqual(response.status_code, 200)\n # TODO: implement me\n\n # query item --------------------------------------------------------------\n def test_get_query_item(self):\n response = self.client.get('/app/queries/' + str(self.query.id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(json['id'], self.query.id)\n self.assertEqual(json['name'], 'When can we meet?')\n\n self.assertEqual(len(json['options']), 3)\n\n self.assertEqual(json['options'][0]['id'], self.options[0].id)\n self.assertEqual(json['options'][1]['id'], self.options[1].id)\n self.assertEqual(json['options'][2]['id'], self.options[2].id)\n\n self.assertEqual(json['options'][0]['begin_date'], self.options[0].begin_date)\n self.assertEqual(json['options'][1]['begin_date'], self.options[1].begin_date)\n self.assertEqual(json['options'][2]['begin_date'], self.options[2].begin_date)\n\n self.assertEqual(json['options'][0]['begin_time'], self.options[0].begin_time)\n self.assertEqual(json['options'][1]['begin_time'], self.options[1].begin_time)\n self.assertEqual(json['options'][2]['begin_time'], self.options[2].begin_time)\n\n self.assertEqual(json['options'][0]['end_date'], self.options[0].end_date)\n self.assertEqual(json['options'][1]['end_date'], self.options[1].end_date)\n self.assertEqual(json['options'][2]['end_date'], self.options[2].end_date)\n\n self.assertEqual(json['options'][0]['end_time'], self.options[0].end_time)\n self.assertEqual(json['options'][1]['end_time'], self.options[1].end_time)\n self.assertEqual(json['options'][2]['end_time'], self.options[2].end_time)\n\n self.assertEqual(len(json['options'][0]['choices']), 2)\n self.assertEqual(len(json['options'][1]['choices']), 2)\n self.assertEqual(len(json['options'][2]['choices']), 2)\n\n self.assertEqual(json['options'][0]['choices'][0]['id'], self.choices[0].id)\n self.assertEqual(json['options'][0]['choices'][0]['attendee'], self.choices[0].attendee.name)\n self.assertEqual(json['options'][0]['choices'][0]['attendee_id'], self.choices[0].attendee.id)\n self.assertEqual(json['options'][0]['choices'][0]['status'], self.choices[0].status)\n self.assertEqual(json['options'][1]['choices'][0]['id'], self.choices[1].id)\n self.assertEqual(json['options'][1]['choices'][0]['attendee'], self.choices[1].attendee.name)\n self.assertEqual(json['options'][1]['choices'][0]['attendee_id'], self.choices[1].attendee.id)\n self.assertEqual(json['options'][1]['choices'][0]['status'], self.choices[1].status)\n self.assertEqual(json['options'][2]['choices'][0]['id'], self.choices[2].id)\n self.assertEqual(json['options'][2]['choices'][0]['attendee'], self.choices[2].attendee.name)\n self.assertEqual(json['options'][2]['choices'][0]['attendee_id'], self.choices[2].attendee.id)\n self.assertEqual(json['options'][2]['choices'][0]['status'], self.choices[2].status)\n self.assertEqual(json['options'][0]['choices'][1]['id'], self.choices[3].id)\n self.assertEqual(json['options'][0]['choices'][1]['attendee'], self.choices[3].attendee.name)\n self.assertEqual(json['options'][0]['choices'][1]['attendee_id'], self.choices[3].attendee.id)\n self.assertEqual(json['options'][0]['choices'][1]['status'], self.choices[3].status)\n self.assertEqual(json['options'][1]['choices'][1]['id'], self.choices[4].id)\n self.assertEqual(json['options'][1]['choices'][1]['attendee'], self.choices[4].attendee.name)\n self.assertEqual(json['options'][1]['choices'][1]['attendee_id'], self.choices[4].attendee.id)\n self.assertEqual(json['options'][1]['choices'][1]['status'], self.choices[4].status)\n self.assertEqual(json['options'][2]['choices'][1]['id'], self.choices[5].id)\n self.assertEqual(json['options'][2]['choices'][1]['attendee'], self.choices[5].attendee.name)\n self.assertEqual(json['options'][2]['choices'][1]['attendee_id'], self.choices[5].attendee.id)\n self.assertEqual(json['options'][2]['choices'][1]['status'], self.choices[5].status)\n\n def test_post_query_item(self):\n response = self.client.post('/app/queries/' + str(self.query.id) + '/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_put_query_item(self):\n response = self.client.put('/app/queries/' + str(self.query.id) + '/', {'name': 'New Query'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(json['id'], self.query.id)\n self.assertEqual(json['name'], 'New Query')\n\n self.assertEqual(len(json['options']), 3)\n\n self.assertEqual(json['options'][0]['id'], self.options[0].id)\n self.assertEqual(json['options'][1]['id'], self.options[1].id)\n self.assertEqual(json['options'][2]['id'], self.options[2].id)\n\n self.assertEqual(json['options'][0]['begin_date'], self.options[0].begin_date)\n self.assertEqual(json['options'][1]['begin_date'], self.options[1].begin_date)\n self.assertEqual(json['options'][2]['begin_date'], self.options[2].begin_date)\n\n self.assertEqual(json['options'][0]['begin_time'], self.options[0].begin_time)\n self.assertEqual(json['options'][1]['begin_time'], self.options[1].begin_time)\n self.assertEqual(json['options'][2]['begin_time'], self.options[2].begin_time)\n\n self.assertEqual(json['options'][0]['end_date'], self.options[0].end_date)\n self.assertEqual(json['options'][1]['end_date'], self.options[1].end_date)\n self.assertEqual(json['options'][2]['end_date'], self.options[2].end_date)\n\n self.assertEqual(json['options'][0]['end_time'], self.options[0].end_time)\n self.assertEqual(json['options'][1]['end_time'], self.options[1].end_time)\n self.assertEqual(json['options'][2]['end_time'], self.options[2].end_time)\n\n self.assertEqual(len(json['options'][0]['choices']), 2)\n self.assertEqual(len(json['options'][1]['choices']), 2)\n self.assertEqual(len(json['options'][2]['choices']), 2)\n\n self.assertEqual(json['options'][0]['choices'][0]['id'], self.choices[0].id)\n self.assertEqual(json['options'][0]['choices'][0]['attendee'], self.choices[0].attendee.name)\n self.assertEqual(json['options'][0]['choices'][0]['attendee_id'], self.choices[0].attendee.id)\n self.assertEqual(json['options'][0]['choices'][0]['status'], self.choices[0].status)\n self.assertEqual(json['options'][1]['choices'][0]['id'], self.choices[1].id)\n self.assertEqual(json['options'][1]['choices'][0]['attendee'], self.choices[1].attendee.name)\n self.assertEqual(json['options'][1]['choices'][0]['attendee_id'], self.choices[1].attendee.id)\n self.assertEqual(json['options'][1]['choices'][0]['status'], self.choices[1].status)\n self.assertEqual(json['options'][2]['choices'][0]['id'], self.choices[2].id)\n self.assertEqual(json['options'][2]['choices'][0]['attendee'], self.choices[2].attendee.name)\n self.assertEqual(json['options'][2]['choices'][0]['attendee_id'], self.choices[2].attendee.id)\n self.assertEqual(json['options'][2]['choices'][0]['status'], self.choices[2].status)\n self.assertEqual(json['options'][0]['choices'][1]['id'], self.choices[3].id)\n self.assertEqual(json['options'][0]['choices'][1]['attendee'], self.choices[3].attendee.name)\n self.assertEqual(json['options'][0]['choices'][1]['attendee_id'], self.choices[3].attendee.id)\n self.assertEqual(json['options'][0]['choices'][1]['status'], self.choices[3].status)\n self.assertEqual(json['options'][1]['choices'][1]['id'], self.choices[4].id)\n self.assertEqual(json['options'][1]['choices'][1]['attendee'], self.choices[4].attendee.name)\n self.assertEqual(json['options'][1]['choices'][1]['attendee_id'], self.choices[4].attendee.id)\n self.assertEqual(json['options'][1]['choices'][1]['status'], self.choices[4].status)\n self.assertEqual(json['options'][2]['choices'][1]['id'], self.choices[5].id)\n self.assertEqual(json['options'][2]['choices'][1]['attendee'], self.choices[5].attendee.name)\n self.assertEqual(json['options'][2]['choices'][1]['attendee_id'], self.choices[5].attendee.id)\n self.assertEqual(json['options'][2]['choices'][1]['status'], self.choices[5].status)\n\n def test_patch_query_item(self):\n response = self.client.patch('/app/queries/' + str(self.query.id) + '/', {'name': 'Updated Query'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(json['id'], self.query.id)\n self.assertEqual(json['name'], 'Updated Query')\n\n self.assertEqual(len(json['options']), 3)\n\n self.assertEqual(json['options'][0]['id'], self.options[0].id)\n self.assertEqual(json['options'][1]['id'], self.options[1].id)\n self.assertEqual(json['options'][2]['id'], self.options[2].id)\n\n self.assertEqual(json['options'][0]['begin_date'], self.options[0].begin_date)\n self.assertEqual(json['options'][1]['begin_date'], self.options[1].begin_date)\n self.assertEqual(json['options'][2]['begin_date'], self.options[2].begin_date)\n\n self.assertEqual(json['options'][0]['begin_time'], self.options[0].begin_time)\n self.assertEqual(json['options'][1]['begin_time'], self.options[1].begin_time)\n self.assertEqual(json['options'][2]['begin_time'], self.options[2].begin_time)\n\n self.assertEqual(json['options'][0]['end_date'], self.options[0].end_date)\n self.assertEqual(json['options'][1]['end_date'], self.options[1].end_date)\n self.assertEqual(json['options'][2]['end_date'], self.options[2].end_date)\n\n self.assertEqual(json['options'][0]['end_time'], self.options[0].end_time)\n self.assertEqual(json['options'][1]['end_time'], self.options[1].end_time)\n self.assertEqual(json['options'][2]['end_time'], self.options[2].end_time)\n\n self.assertEqual(len(json['options'][0]['choices']), 2)\n self.assertEqual(len(json['options'][1]['choices']), 2)\n self.assertEqual(len(json['options'][2]['choices']), 2)\n\n self.assertEqual(json['options'][0]['choices'][0]['id'], self.choices[0].id)\n self.assertEqual(json['options'][0]['choices'][0]['attendee'], self.choices[0].attendee.name)\n self.assertEqual(json['options'][0]['choices'][0]['attendee_id'], self.choices[0].attendee.id)\n self.assertEqual(json['options'][0]['choices'][0]['status'], self.choices[0].status)\n self.assertEqual(json['options'][1]['choices'][0]['id'], self.choices[1].id)\n self.assertEqual(json['options'][1]['choices'][0]['attendee'], self.choices[1].attendee.name)\n self.assertEqual(json['options'][1]['choices'][0]['attendee_id'], self.choices[1].attendee.id)\n self.assertEqual(json['options'][1]['choices'][0]['status'], self.choices[1].status)\n self.assertEqual(json['options'][2]['choices'][0]['id'], self.choices[2].id)\n self.assertEqual(json['options'][2]['choices'][0]['attendee'], self.choices[2].attendee.name)\n self.assertEqual(json['options'][2]['choices'][0]['attendee_id'], self.choices[2].attendee.id)\n self.assertEqual(json['options'][2]['choices'][0]['status'], self.choices[2].status)\n self.assertEqual(json['options'][0]['choices'][1]['id'], self.choices[3].id)\n self.assertEqual(json['options'][0]['choices'][1]['attendee'], self.choices[3].attendee.name)\n self.assertEqual(json['options'][0]['choices'][1]['attendee_id'], self.choices[3].attendee.id)\n self.assertEqual(json['options'][0]['choices'][1]['status'], self.choices[3].status)\n self.assertEqual(json['options'][1]['choices'][1]['id'], self.choices[4].id)\n self.assertEqual(json['options'][1]['choices'][1]['attendee'], self.choices[4].attendee.name)\n self.assertEqual(json['options'][1]['choices'][1]['attendee_id'], self.choices[4].attendee.id)\n self.assertEqual(json['options'][1]['choices'][1]['status'], self.choices[4].status)\n self.assertEqual(json['options'][2]['choices'][1]['id'], self.choices[5].id)\n self.assertEqual(json['options'][2]['choices'][1]['attendee'], self.choices[5].attendee.name)\n self.assertEqual(json['options'][2]['choices'][1]['attendee_id'], self.choices[5].attendee.id)\n self.assertEqual(json['options'][2]['choices'][1]['status'], self.choices[5].status)\n\n def test_delete_query_item(self):\n response = self.client.delete('/app/queries/' + str(self.query.id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/app/queries/' + str(self.query.id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_options_query_item(self):\n response = self.client.options('/app/queries/' + str(self.query.id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # TODO: implement me\n\n # option list -------------------------------------------------------------\n def test_get_option_list(self):\n response = self.client.get('/app/options/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_post_option_list(self):\n response = self.client.post('/app/options/', {\n 'query_id': self.query.id,\n 'begin_date': '2021-01-01', 'begin_time': '18:00:00',\n 'end_date': '2021-01-02', 'end_time': '03:00:00'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertGreaterEqual(int(json['id']), 1)\n self.assertEqual(json['query_id'], self.query.id)\n self.assertEqual(json['begin_date'], '2021-01-01')\n self.assertEqual(json['begin_time'], '18:00:00')\n self.assertEqual(json['end_date'], '2021-01-02')\n self.assertEqual(json['end_time'], '03:00:00')\n\n def test_put_option_list(self):\n response = self.client.put('/app/options/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_patch_option_list(self):\n response = self.client.patch('/app/options/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_delete_option_list(self):\n response = self.client.delete('/app/options/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_options_option_list(self):\n response = self.client.options('/app/options/', {'format': 'json'})\n self.assertEqual(response.status_code, 200)\n # TODO: implement me\n\n # option item -------------------------------------------------------------\n def test_get_option_item(self):\n response = self.client.get('/app/options/' + str(self.options[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(json['id'], self.options[0].id)\n self.assertEqual(json['query_id'], self.options[0].query.id)\n self.assertEqual(json['begin_date'], self.options[0].begin_date)\n self.assertEqual(json['begin_time'], self.options[0].begin_time)\n self.assertEqual(json['end_date'], self.options[0].end_date)\n self.assertEqual(json['end_time'], self.options[0].end_time)\n\n def test_post_option_item(self):\n response = self.client.post('/app/options/' + str(self.options[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_put_option_item(self):\n response = self.client.put('/app/options/' + str(self.options[0].id) + '/', {\n 'query_id': self.query.id,\n 'begin_date': '2021-01-11', 'begin_time': '20:30:00',\n 'end_date': '2021-01-11', 'end_time': '21:00:00'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(int(json['id']), self.options[0].id)\n self.assertEqual(json['query_id'], self.query.id)\n self.assertEqual(json['begin_date'], '2021-01-11')\n self.assertEqual(json['begin_time'], '20:30:00')\n self.assertEqual(json['end_date'], '2021-01-11')\n self.assertEqual(json['end_time'], '21:00:00')\n\n def test_patch_option_item(self):\n response = self.client.patch('/app/options/' + str(self.options[0].id) + '/', {\n 'begin_time': '18:30:00'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(int(json['id']), self.options[0].id)\n self.assertEqual(json['query_id'], self.query.id)\n self.assertEqual(json['begin_date'], '2021-01-01')\n self.assertEqual(json['begin_time'], '18:30:00')\n self.assertEqual(json['end_date'], '2021-01-02')\n self.assertEqual(json['end_time'], '03:00:00')\n\n def test_delete_option_item(self):\n response = self.client.delete('/app/options/' + str(self.options[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/app/options/' + str(self.options[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_options_option_item(self):\n response = self.client.options('/app/options/' + str(self.options[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # TODO: implement me\n\n # choice list -------------------------------------------------------------\n def test_get_choice_list(self):\n response = self.client.get('/app/choices/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_post_choice_list(self):\n response = self.client.post('/app/choices/', {\n 'option_id': self.options[0].id,\n 'attendee_id': self.attendees[2].id,\n 'status': 'Y'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertGreaterEqual(int(json['id']), 1)\n self.assertEqual(int(json['option_id']), self.options[0].id)\n self.assertEqual(int(json['attendee_id']), self.attendees[2].id)\n self.assertEqual(json['attendee'], self.attendees[2].name)\n self.assertEqual(json['status'], 'Y')\n\n def test_put_choice_list(self):\n response = self.client.put('/app/choices/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_patch_choice_list(self):\n response = self.client.patch('/app/choices/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_delete_choice_list(self):\n response = self.client.delete('/app/choices/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_choices_option_list(self):\n response = self.client.options('/app/options/', {'format': 'json'})\n self.assertEqual(response.status_code, 200)\n # TODO: implement me\n\n # choice item -------------------------------------------------------------\n def test_get_choice_item(self):\n response = self.client.get('/app/choices/' + str(self.choices[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(int(json['id']), self.choices[0].id)\n self.assertEqual(int(json['option_id']), self.choices[0].option_id)\n self.assertEqual(int(json['attendee_id']), self.choices[0].attendee_id)\n self.assertEqual(json['attendee'], self.choices[0].attendee.name)\n self.assertEqual(json['status'], self.choices[0].status)\n\n def test_post_choice_item(self):\n response = self.client.post('/app/choices/' + str(self.choices[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_put_choice_item(self):\n response = self.client.put('/app/choices/' + str(self.choices[0].id) + '/', {\n 'option_id': self.options[1].id,\n 'attendee_id': self.attendees[2].id,\n 'status': 'N'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(int(json['id']), self.choices[0].id)\n self.assertEqual(int(json['option_id']), self.options[1].id)\n self.assertEqual(int(json['attendee_id']), self.attendees[2].id)\n self.assertEqual(json['attendee'], self.attendees[2].name)\n self.assertEqual(json['status'], 'N')\n\n def test_patch_choice_item(self):\n response = self.client.patch('/app/choices/' + str(self.choices[0].id) + '/', {\n 'status': 'N'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(int(json['id']), self.choices[0].id)\n self.assertEqual(int(json['option_id']), self.options[0].id)\n self.assertEqual(int(json['attendee_id']), self.attendees[0].id)\n self.assertEqual(json['attendee'], self.attendees[0].name)\n self.assertEqual(json['status'], 'N')\n\n def test_delete_choice_item(self):\n response = self.client.delete('/app/choices/' + str(self.choices[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/app/choices/' + str(self.choices[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_choices_option_item(self):\n response = self.client.options('/app/choices/' + str(self.choices[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # TODO: implement me\n\n # # attendee list -------------------------------------------------------------\n def test_get_attendee_list(self):\n response = self.client.get('/app/attendees/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_post_attendee_list(self):\n response = self.client.post('/app/attendees/', {\n 'name': 'new attendee'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertGreaterEqual(int(json['id']), 1)\n self.assertEqual(json['name'], 'new attendee')\n\n def test_put_attendee_list(self):\n response = self.client.put('/app/attendees/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_patch_attendee_list(self):\n response = self.client.patch('/app/attendees/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_delete_attendee_list(self):\n response = self.client.delete('/app/attendees/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_attendees_option_list(self):\n response = self.client.options('/app/options/', {'format': 'json'})\n self.assertEqual(response.status_code, 200)\n # TODO: implement me\n\n # attendee item -------------------------------------------------------------\n def test_get_attendee_item(self):\n response = self.client.get('/app/attendees/' + str(self.attendees[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(json['id'], self.attendees[0].id)\n self.assertEqual(json['name'], self.attendees[0].name)\n\n def test_post_attendee_item(self):\n response = self.client.post('/app/attendees/' + str(self.attendees[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n def test_put_attendee_item(self):\n response = self.client.put('/app/attendees/' + str(self.attendees[0].id) + '/', {\n 'name': 'new attendee'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(int(json['id']), self.attendees[0].id)\n self.assertEqual(json['name'], 'new attendee')\n\n def test_patch_attendee_item(self):\n response = self.client.patch('/app/attendees/' + str(self.attendees[0].id) + '/', {\n 'name': 'new attendee'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json = response.json()\n self.assertIsNotNone(json)\n\n self.assertEqual(int(json['id']), self.attendees[0].id)\n self.assertEqual(json['name'], 'new attendee')\n\n def test_delete_attendee_item(self):\n response = self.client.delete('/app/attendees/' + str(self.attendees[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.get('/app/attendees/' + str(self.attendees[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_attendees_option_item(self):\n response = self.client.options('/app/attendees/' + str(self.options[0].id) + '/', {'format': 'json'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # TODO: implement me\n\n\nclass APIAuthTest(APITestCase):\n @classmethod\n def setUpTestData(cls):\n User.objects.create_user(\n 'testuser', '[email protected]', 'testpassword')\n\n def setUp(self):\n token_response = self.client.post('/app/auth-token/', {\n 'username': 'testuser', 'password': 'testpassword', })\n self.assertEqual(token_response.status_code, 200)\n json = token_response.json()\n token = json['token']\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)\n\n def test_something(self):\n pass\n" }, { "alpha_fraction": 0.6314631700515747, "alphanum_fraction": 0.6314631700515747, "avg_line_length": 35.36000061035156, "blob_id": "e67916998e5e8ecccdc492cb323f2c840f9d9252", "content_id": "ae44c2daa202277b15a40bf98a88678ea90a2509", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1818, "license_type": "no_license", "max_line_length": 96, "num_lines": 50, "path": "/app/views.py", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "from rest_framework import viewsets, mixins # , permissions\n\nfrom .serializers import QuerySerializer, OptionSerializer, ChoiceSerializer, AttendeeSerializer\nfrom .models import Query, Option, Choice, Attendee\n\n\nclass QueryViewSet(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.GenericViewSet):\n queryset = Query.objects.all()\n serializer_class = QuerySerializer\n\n# def get_permissions(self):\n# if self.action == 'retrieve':\n# permission_classes = [permissions.AllowAny]\n# elif self.action == 'list':\n# permission_classes = [permissions.AllowAny]\n# else:\n# permission_classes = [permissions.IsAuthenticated]\n#\n# return [permission() for permission in permission_classes]\n\n\nclass OptionViewSet(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.GenericViewSet):\n queryset = Option.objects.all()\n serializer_class = OptionSerializer\n\n\nclass ChoiceViewSet(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.GenericViewSet):\n queryset = Choice.objects.all()\n serializer_class = ChoiceSerializer\n\n\nclass AttendeeViewSet(mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.GenericViewSet):\n queryset = Attendee.objects.all()\n serializer_class = AttendeeSerializer\n" }, { "alpha_fraction": 0.6889764070510864, "alphanum_fraction": 0.6929134130477905, "avg_line_length": 21.04347801208496, "blob_id": "5850fa61b5ee6fd13f635d475eee7ad1448f126f", "content_id": "d0a0e1d1a8d579540289c6e3e0b4c3c5028e4079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 508, "license_type": "no_license", "max_line_length": 53, "num_lines": 23, "path": "/django-entrypoint.sh", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "#!/bin/sh\n# thanks to https://blog.bitsacm.in/django-on-docker/\n\nif [ \"$DATABASE\" = \"postgres\" ]; then\n echo \"Waiting for postgres...\"\n\n while ! nc -z $DATABASE_HOST $DATABASE_PORT; do\n sleep 0.5\n done\n\n echo \"PostgreSQL started\"\nfi\n\necho \"Making migrations and migrating the database.\"\npython manage.py makemigrations app --noinput\npython manage.py migrate --noinput\n\necho \"Collecting static files.\"\npython manage.py collectstatic --noinput\n\necho \"Done.\"\necho \"Passing on to $@\"\nexec \"$@\"\n\n" }, { "alpha_fraction": 0.6878698468208313, "alphanum_fraction": 0.6893491148948669, "avg_line_length": 22.310344696044922, "blob_id": "0a116d111d34005a144b089317fa12ffff4f397d", "content_id": "778fd50e43b3fbb37282a9f5cf7370fcbb330c2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 676, "license_type": "no_license", "max_line_length": 66, "num_lines": 29, "path": "/app/admin.py", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import Query, Option, Attendee, Choice\n\n\nclass OptionInline(admin.StackedInline):\n model = Option\n extra = 1\n\n\[email protected](Query)\nclass QueryAdmin(admin.ModelAdmin):\n search_fields = ['title']\n inlines = [OptionInline]\n\n\[email protected](Attendee)\nclass AttendeeAdmin(admin.ModelAdmin):\n search_fields = ['name']\n\n\[email protected](Choice)\nclass ChoiceAdmin(admin.ModelAdmin):\n list_display = ('attendee', 'query', 'option', 'status')\n list_display_links = ('attendee', 'query', 'option', 'status')\n list_filter = ('attendee', 'option__query')\n\n def query(self, obj):\n return obj.option.query\n" }, { "alpha_fraction": 0.7765151262283325, "alphanum_fraction": 0.7765151262283325, "avg_line_length": 34.20000076293945, "blob_id": "29b6546c52e1901af022dbb54a9c96ced5bbca19", "content_id": "af9e734fc1f7418b2828625cad4ce9012d85fddd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "no_license", "max_line_length": 61, "num_lines": 15, "path": "/app/urls.py", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "from django.urls import include, path\nfrom rest_framework import routers\nfrom rest_framework.authtoken import views as authtoken_views\nfrom . import views as app_views\n\nrouter = routers.DefaultRouter()\nrouter.register('queries', app_views.QueryViewSet)\nrouter.register('options', app_views.OptionViewSet)\nrouter.register('choices', app_views.ChoiceViewSet)\nrouter.register('attendees', app_views.AttendeeViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n path('auth-token/', authtoken_views.obtain_auth_token),\n]\n" }, { "alpha_fraction": 0.576144814491272, "alphanum_fraction": 0.6091586947441101, "avg_line_length": 31.379310607910156, "blob_id": "1fd59a88a77e92d7dd2333eaeb0e9aea0c65d848", "content_id": "c35e1f4fd85e8849d8d08616a4ecb4d15052f7e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 939, "license_type": "no_license", "max_line_length": 124, "num_lines": 29, "path": "/app/migrations/0003_auto_20201230_2156.py", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.4 on 2020-12-30 21:56\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0002_auto_20201230_1944'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='choice',\n name='attendee',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='choices', to='app.attendee'),\n ),\n migrations.AlterField(\n model_name='choice',\n name='option',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='choices', to='app.option'),\n ),\n migrations.AlterField(\n model_name='option',\n name='query',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='options', to='app.query'),\n ),\n ]\n" }, { "alpha_fraction": 0.6790322661399841, "alphanum_fraction": 0.6790322661399841, "avg_line_length": 29.950000762939453, "blob_id": "d691e174a3aafb8f5946b902bac36b7f61435f15", "content_id": "447ff955a3f229b8ec17bc5c5ee41de4c1403d90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 620, "license_type": "no_license", "max_line_length": 90, "num_lines": 20, "path": "/gitlab-runner-setup.sh", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\ndocker volume create gitlab-runner-config\n\ndocker run -d --name gitlab-runner --restart always \\\n -v /var/run/docker.sock:/var/run/docker.sock \\\n -v gitlab-runner-config:/etc/gitlab-runner \\\n gitlab/gitlab-runner:latest\n\ndocker run --rm -v gitlab-runner-config:/etc/gitlab-runner gitlab/gitlab-runner register \\\n --non-interactive \\\n --executor \"docker\" \\\n --docker-image alpine:latest \\\n --url \"https://gitlab.com/\" \\\n --registration-token \"TOKEN-HERE\" \\\n --description \"docker-runner\" \\\n --tag-list \"docker\" \\\n --run-untagged=\"true\" \\\n --locked=\"false\" \\\n --access-level=\"not_protected\"\n\n" }, { "alpha_fraction": 0.6832579374313354, "alphanum_fraction": 0.6877828240394592, "avg_line_length": 19.045454025268555, "blob_id": "2caa7f12d3a61f3de6f5c9aec24c7138e2840fe1", "content_id": "567349a6107b1b3cf5abbfa645cddedc44905d4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 442, "license_type": "no_license", "max_line_length": 84, "num_lines": 22, "path": "/nginx/nginx-start-watch-certs.sh", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "#/bin/sh\n\nset -e\n# set -x\n\nSLEEP_BEFORE_RELOAD=60\n\nwatch_certs_and_reload() {\n while true; do\n # TODO watch explicitely on the creation of new certificates\n inotifywait /etc/letsencrypt/live/;\n echo \"cert change detected, waiting $SLEEP_BEFORE_RELOAD seconds before reload\";\n sleep $SLEEP_BEFORE_RELOAD\n echo \"reloading nginx\";\n nginx -s reload;\n echo \"done\";\n done\n}\n\nwatch_certs_and_reload &\n\nnginx -g \"daemon off;\"\n\n" }, { "alpha_fraction": 0.6807780265808105, "alphanum_fraction": 0.6945080161094666, "avg_line_length": 22, "blob_id": "6cc9ac01790e203c6619710e1ca5200355159df8", "content_id": "cf71328c03e14d803ceb7b7311b378eedc5ee871", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 874, "license_type": "no_license", "max_line_length": 63, "num_lines": 38, "path": "/nginx/nginx-init-ssl.sh", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nset -e\n# set -x\n\nDOMAIN_NAME=api.poodle.rtti.de\n\nPATH_LETSENCRYPT=/etc/letsencrypt\n\nFILE_DHPARAMS=$PATH_LETSENCRYPT/dhparams.pem\nFILE_CHAIN=$PATH_LETSENCRYPT/live/$DOMAIN_NAME/fullchain.pem\nFILE_KEY=$PATH_LETSENCRYPT/live/$DOMAIN_NAME/privkey.pem\n\nSIZE_DHPARAMS=4096\nSIZE_KEY=4096\n\nmkdir -p $PATH_LETSENCRYPT/live/$DOMAIN_NAME/\n\nif [ ! -e $FILE_DHPARAMS ]; then\n echo \"Generating $FILE_DHPARAMS...\"\n openssl dhparam -out $FILE_DHPARAMS $SIZE_DHPARAMS\n echo \"Done.\"\nelse\n echo \"Using existing $FILE_DHPARAMS.\"\nfi\n\nif [ ! -e $FILE_KEY ] || [ ! -e $FILE_CHAIN ]; then\n echo \"Generating $FILE_CHAIN and $FILE_KEY...\"\n openssl req -x509 -nodes -newkey rsa:$SIZE_KEY -days 1 \\\n -keyout $FILE_KEY -out $FILE_CHAIN -subj \"/CN=$DOMAIN_NAME\"\n echo \"Done.\"\nelse\n echo \"Using existing $FILE_CHAIN and $FILE_KEY.\"\nfi\n\necho \"Done.\"\necho \"Passing on to $@\"\nexec \"$@\"\n" }, { "alpha_fraction": 0.7053571343421936, "alphanum_fraction": 0.7276785969734192, "avg_line_length": 19.363636016845703, "blob_id": "712cdc81aee3f01eaf003778545c41c3bde8532f", "content_id": "354557e9b24c9096e7204ca80812d1775ea0c1dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 896, "license_type": "no_license", "max_line_length": 157, "num_lines": 44, "path": "/README.md", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "[![pipeline status](https://gitlab.com/rttti/poodle-backend-django/badges/main/pipeline.svg)](https://gitlab.com/rttti/poodle-backend-django/-/commits/main) \n\n\n# poodle-backend-django\n\nPoodle is a scheduling tool. \n\nThis repository contains the REST API backend implemented in django.\n\n## Based on\n\n - https://github.com/rti/django-rest-api\n\n## How to use\n\nInstall docker-compose\n\n### Start the API\n\n```shell\ndocker-compose up\n```\n\nDirect your browser to http://127.0.0.1:8000/app/\n\n### Access the django admin interface\n\n```shell\ndocker-compose run --rm django python manage.py createsuperuser\ndocker-compose up\n```\n\nDirect your browser to http://127.0.0.1:8000/admin/\n\n### Run tests\n\n```shell\ndocker-compose run --rm django python manage.py test --noinput --failfast\n```\n### Deploy in production\n\n```shell\ndocker-compose -f docker-compose.yml -f docker-compose-production.yml up -d --build\n```\n" }, { "alpha_fraction": 0.46786633133888245, "alphanum_fraction": 0.5475578308105469, "avg_line_length": 18.450000762939453, "blob_id": "662dd57fffe05b414fa9d40b576b087e1bb4c516", "content_id": "9c490d85f026b9bc7986c315303b056d99d0e2d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 389, "license_type": "no_license", "max_line_length": 47, "num_lines": 20, "path": "/app/migrations/0004_auto_20201231_1433.py", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.4 on 2020-12-31 14:33\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0003_auto_20201230_2156'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Item',\n ),\n migrations.AlterModelOptions(\n name='option',\n options={},\n ),\n ]\n" }, { "alpha_fraction": 0.5651041865348816, "alphanum_fraction": 0.578125, "avg_line_length": 14.319999694824219, "blob_id": "4806c62e686551bc11ad67cda54966fce3ce46ef", "content_id": "eaedf56c43f1f463bfd5a97573171f270f02fa19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 384, "license_type": "no_license", "max_line_length": 46, "num_lines": 25, "path": "/docker-compose.yml", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "version: '3'\n\nvolumes:\n postgres-data:\n static:\n\nservices:\n postgres:\n image: postgres:latest\n expose:\n - \"5432\"\n volumes:\n - postgres-data:/var/lib/postgresql/data\n env_file:\n - project.env\n\n django:\n build:\n context: .\n volumes:\n - static:/home/django/project/static\n env_file:\n - project.env\n depends_on:\n - postgres\n\n" }, { "alpha_fraction": 0.6904109716415405, "alphanum_fraction": 0.6904109716415405, "avg_line_length": 23.33333396911621, "blob_id": "393e5f6077eb1c202d977276c0ffc4b2df96c271", "content_id": "570d99b6f05fb9ecfdb46d275d4e8a8adadcde78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 365, "license_type": "no_license", "max_line_length": 50, "num_lines": 15, "path": "/certbot/Dockerfile", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "FROM debian:buster-slim\n\nRUN apt-get update\nRUN apt-get install -y openssl certbot netcat\\\n && apt-get remove --purge --auto-remove -y \\\n && rm -rf /var/lib/apt/lists/*\n\nCOPY certbot-init.sh /\nRUN chmod a+x /certbot-init.sh\n\nCOPY certbot-auto-renew.sh /\nRUN chmod a+x /certbot-auto-renew.sh\n\nENTRYPOINT [\"/certbot-init.sh\"]\nCMD [\"/certbot-auto-renew.sh\"]\n" }, { "alpha_fraction": 0.6262136101722717, "alphanum_fraction": 0.655339777469635, "avg_line_length": 21.844444274902344, "blob_id": "96dcc98076fbb77f400430be2a8308fcdfda58a2", "content_id": "8db02b0acb414f295fa01b0ab01aebb5b05d307a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1030, "license_type": "no_license", "max_line_length": 88, "num_lines": 45, "path": "/certbot/certbot-auto-renew.sh", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "\n#!/bin/sh\n\nDOMAIN=\"api.poodle.rtti.de\"\n\nCERT_VALID_TIME_MIN=$((60 * 60 * 24 * 30 * 2))\nCERT_CHECK_INTERVAL=$((60 * 60 * 24))\n\nLETSENCRYPT_DIR=\"/etc/letsencrypt\"\nCURRENT_CERTS_DIR=\"$LETSENCRYPT_DIR/live\"\nCURRENT_CERT=\"$CURRENT_CERTS_DIR/$DOMAIN/fullchain.pem\"\n\n\ncurrent_date_time() {\n date +%s\n}\n\ncurrent_cert_valid_until() {\n date -d \"`openssl x509 -enddate -noout -in $CURRENT_CERT | sed 's/notAfter=//'`\" +%s\n}\n\ncurrent_cert_time_remaining() {\n echo $((`current_cert_valid_until` - `current_date_time`))\n}\n\ncurrent_cert_check_and_renew() {\n echo \"Certificate valid for $(( `current_cert_time_remaining` / 60 / 60 / 24 )) days.\"\n echo \"Renew threshold is $(( $CERT_VALID_TIME_MIN / 60 / 60 / 24 )) days.\"\n\n if [ `current_cert_time_remaining` -lt $CERT_VALID_TIME_MIN ]; then\n echo \"Requesting new certificate.\"\n certbot renew\n else\n echo \"Certificate ok.\"\n fi\n}\n\nmain() {\n while true; do \n echo \"Checking current certificate.\"\n current_cert_check_and_renew\n sleep $CERT_CHECK_INTERVAL\n done\n}\n\nmain\n\n" }, { "alpha_fraction": 0.6311320662498474, "alphanum_fraction": 0.645283043384552, "avg_line_length": 17.928571701049805, "blob_id": "9a2d3bdd5c318b744372761da30b47a3da44c5d6", "content_id": "c8b3b6cf57b5b65e142070066bf3b15b1b617c18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1060, "license_type": "no_license", "max_line_length": 68, "num_lines": 56, "path": "/certbot/certbot-init.sh", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n# set -x\nset -e\n\nDOMAIN=\"api.poodle.rtti.de\"\nEMAIL=\"[email protected]\"\n# enable for testing\n# STAGING=\"--staging\"\n\nRSA_KEY_SIZE=4096\n# testing only\n# RSA_KEY_SIZE=1024\n\nLETSENCRYPT_DIR=\"/etc/letsencrypt\"\nCURRENT_CERTS_DIR=\"$LETSENCRYPT_DIR/live/$DOMAIN\"\nCURRENT_CERT=\"$CURRENT_CERTS_DIR/fullchain.pem\"\n\nwait_for_nginx() {\n echo \"Waiting for nginx...\"\n while ! nc -z nginx 80; do\n sleep 0.5\n done\n\n echo \"Nginx started\"\n}\n\nmain() {\n wait_for_nginx\n\n cert_issuer=`openssl x509 -issuer -noout -in $CURRENT_CERT`\n\n if [ \"$cert_issuer\" = \"issuer=CN = $DOMAIN\" ]; then\n echo \"Found self signed certificate.\"\n\n echo \"Moving to $LETSENCRYPT_DIR/selfsigned-bootstrap-cert.\"\n mv $CURRENT_CERTS_DIR $LETSENCRYPT_DIR/selfsigned-bootstrap-cert\n\n certbot certonly --webroot -w /var/www/certbot \\\n $STAGING \\\n --email $EMAIL \\\n -d $DOMAIN \\\n --rsa-key-size $RSA_KEY_SIZE \\\n --agree-tos \\\n --no-eff-email\n else\n echo \"Ok, certificate is not self signed.\"\n fi\n\n echo \"Done.\"\n}\n\nmain\n\necho \"Passing on to $@\"\nexec \"$@\"\n" }, { "alpha_fraction": 0.6756756901741028, "alphanum_fraction": 0.6756756901741028, "avg_line_length": 30.983051300048828, "blob_id": "4731cb8c3d58ba57851dd6f1e473f91e5f69d671", "content_id": "a1a22337cc0df9f28762319fd727b48f917a455d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1887, "license_type": "no_license", "max_line_length": 87, "num_lines": 59, "path": "/app/serializers.py", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom .models import Query, Option, Choice, Attendee\n\n\nclass EmbeddedChoiceSerializer(serializers.ModelSerializer):\n attendee = serializers.SlugRelatedField(read_only=True, slug_field='name')\n status = serializers.CharField(read_only=True)\n\n class Meta:\n model = Choice\n fields = ['id', 'attendee', 'attendee_id', 'status']\n\n\nclass EmbeddedOptionSerializer(serializers.ModelSerializer):\n choices = EmbeddedChoiceSerializer(many=True, read_only=True)\n begin_date = serializers.DateField(read_only=True)\n begin_time = serializers.TimeField(read_only=True)\n end_date = serializers.DateField(read_only=True)\n end_time = serializers.TimeField(read_only=True)\n\n class Meta:\n model = Option\n fields = ['id', 'begin_date', 'begin_time',\n 'end_date', 'end_time', 'choices']\n\n\nclass QuerySerializer(serializers.ModelSerializer):\n options = EmbeddedOptionSerializer(many=True, read_only=True)\n # choices = ChoiceSerializer(many=True, read_only=True)\n\n class Meta:\n model = Query\n fields = ('id', 'name', 'options')\n # fields = ('id', 'name', 'options', 'choices')\n\n\nclass OptionSerializer(serializers.ModelSerializer):\n query_id = serializers.IntegerField()\n\n class Meta:\n model = Option\n fields = ['id', 'begin_date', 'begin_time', 'end_date', 'end_time', 'query_id']\n\n\nclass ChoiceSerializer(serializers.ModelSerializer):\n attendee = serializers.SlugRelatedField(read_only=True, slug_field='name')\n attendee_id = serializers.IntegerField()\n option_id = serializers.IntegerField()\n\n class Meta:\n model = Choice\n fields = ['id', 'attendee', 'attendee_id', 'option_id', 'status']\n\n\nclass AttendeeSerializer(serializers.ModelSerializer):\n class Meta:\n model = Attendee\n fields = ['id', 'name']\n" }, { "alpha_fraction": 0.5219278931617737, "alphanum_fraction": 0.5327833294868469, "avg_line_length": 38.7068977355957, "blob_id": "e370a708954c40db985a20002a2cc08daf1d99af", "content_id": "d2ecc436660538131ae0a2e6c2922dac119ecc65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2303, "license_type": "no_license", "max_line_length": 114, "num_lines": 58, "path": "/app/migrations/0002_auto_20201230_1944.py", "repo_name": "rti/poodle-backend-django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.4 on 2020-12-30 19:44\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Attendee',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=64)),\n ],\n ),\n migrations.CreateModel(\n name='Query',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=512)),\n ],\n options={\n 'verbose_name_plural': 'Queries',\n },\n ),\n migrations.CreateModel(\n name='Option',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('begin_date', models.DateField()),\n ('begin_time', models.TimeField(blank=True, null=True)),\n ('end_date', models.DateField(blank=True, null=True)),\n ('end_time', models.TimeField(blank=True, null=True)),\n ('query', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.query')),\n ],\n options={\n 'ordering': ['id'],\n },\n ),\n migrations.CreateModel(\n name='Choice',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('status', models.CharField(choices=[('Y', 'Yes'), ('N', 'No'), ('M', 'Maybe')], max_length=1)),\n ('attendee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.attendee')),\n ('option', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.option')),\n ],\n ),\n migrations.AddConstraint(\n model_name='choice',\n constraint=models.UniqueConstraint(fields=('attendee', 'option'), name='unique_choice'),\n ),\n ]\n" } ]
20
samsam2610/Bone-Project
https://github.com/samsam2610/Bone-Project
26b365ca91e3d1d75e1b9f9259a196ad5f98a833
b4ed762ce3ecf65d48512e7d1c0cda745fb1f7eb
d913aae6415129aaee26bee228e91cce6dd526ab
refs/heads/master
2020-02-19T18:38:46.354335
2020-02-09T21:40:55
2020-02-09T21:40:55
126,262,856
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.511081874370575, "alphanum_fraction": 0.5448910593986511, "avg_line_length": 33.77777862548828, "blob_id": "1554ab3876a7e7602416b909abe2d8bc688dd127", "content_id": "0df3c7e77fb9c7cd520ac7b5be69d6c526372fee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5324, "license_type": "no_license", "max_line_length": 94, "num_lines": 153, "path": "/Python/pca.py", "repo_name": "samsam2610/Bone-Project", "src_encoding": "UTF-8", "text": "import scipy.io as sco\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA as sklearnPCA\nimport xlrd\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.patches import Ellipse\n\n\ndef hexbin(x, y, color, **kwargs):\n cmap = sns.light_palette(color, as_cmap=True)\n plt.hexbin(x, y, gridsize=10 ^ -3, cmap=cmap, **kwargs)\n\n\ndef get_cmap(n, name='hsv'):\n return plt.cm.get_cmap(name, n)\n\n\ndef get_cov_ellipse(cov, cmap, ax1, ax2, ax3):\n # Find and sort eigenvalues and eigenvectors into descending order\n eigvals, eigvecs = np.linalg.eigh(cov)\n order = eigvals.argsort()[::-1]\n eigvals, eigvecs = eigvals[order], eigvecs[:, order]\n evec1, evec2, evec3 = eigvecs[:, order]\n x_v1, y_v1, z_v1 = evec1 # Eigenvector with largest eigenvalue\n x_v2, y_v2, z_v2 = evec2\n x_v3, y_v3, z_v3 = evec3\n\n scale = 20\n ax1.plot([x_v1 * -scale, x_v1 * scale],\n [y_v1 * -scale, y_v1 * scale],\n [z_v1 * -scale, z_v1 * scale], c=cmap)\n\n ax2.plot([x_v2 * -scale, x_v2 * scale],\n [y_v2 * -scale, y_v2 * scale],\n [z_v2 * -scale, z_v2 * scale], c=cmap)\n ax3.plot([x_v3 * -scale, x_v3 * scale],\n [y_v3 * -scale, y_v3 * scale],\n [z_v3 * -scale, z_v3 * scale], c=cmap)\n\n\nN = 86\nmat = sco.loadmat(\"/Users/Sam/Dropbox/Data/phantom_clean_data.mat\")\nname_data = open(\"/Users/Sam/Dropbox/Data/phantom_name_data.txt\", mode = 'r')\nname = name_data.readline().split(',')\n\nclean_data = np.array(mat['cleanData'])\ndel mat\n\n\nx1 = np.zeros([1, 1])\ny1 = np.zeros([1, 1])\nz1 = np.zeros([1, 1])\nx2 = np.zeros([1, 1])\ny2 = np.zeros([1, 1])\nz2 = np.zeros([1, 1])\ndata = pd.DataFrame()\nfor i in range (0, len(name)):\n fig, axs = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=True, figsize=[11, 11])\n for j in range(0, 2):\n pos = i*2 + j + 1\n test_data_std = np.delete(clean_data[:, pos * 5 - 5:pos * 5 - 1], 0, 0)\n test_data_std = test_data_std[test_data_std[:, 0] != 0, 1:4]\n test_data_std = StandardScaler().fit_transform(test_data_std)\n sklearn_pca = sklearnPCA(n_components=3)\n fit_data = sklearn_pca.fit_transform(test_data_std)\n test = fit_data[:, 0:3]\n temp_x = test[:, 0]\n temp_y = test[:, 1]\n temp_z = test[:, 2]\n if j == 0:\n x1 = np.append(x1, temp_x)\n y1 = np.append(y1, temp_y)\n z1 = np.append(z1, temp_z)\n coord = np.column_stack((temp_x, temp_y, temp_z))\n tobeAdd = pd.DataFrame(coord)\n data = pd.concat([data, tobeAdd], axis=1, ignore_index=True)\n\n ax = axs[0, 0]\n hb = ax.hexbin(temp_x, temp_y, gridsize=100, cmap=\"inferno\")\n ax.set_xlabel('1st PCA')\n ax.set_ylabel('2nd PCA')\n ax.set_xlim(-3, 3)\n ax.set_ylim(-3, 3)\n ax.set_title(\"Anterior Region - 1st and 2nd PCA\")\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label(\"N\")\n\n ax = axs[0, 1]\n hb = ax.hexbin(temp_x, temp_z, gridsize=100, cmap=\"inferno\")\n ax.set_xlabel('1st PCA')\n ax.set_ylabel('3rd PCA')\n ax.set_title(\"Anterior Region - 1st and 3rd PCA\")\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label(\"N\")\n\n ax = axs[0, 2]\n hb = ax.hexbin(temp_y, temp_z, gridsize=100, cmap=\"inferno\")\n ax.set_xlabel('2nd PCA')\n ax.set_ylabel('3rd PCA')\n ax.set_title(\"Anterior Region - 2nd and 3rd PCA\")\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label(\"N\")\n else:\n x2 = np.append(x2, temp_x)\n y2 = np.append(y2, temp_y)\n z2 = np.append(z2, temp_z)\n coord = np.column_stack((temp_x, temp_y, temp_z))\n tobeAdd = pd.DataFrame(coord)\n data = pd.concat([data, tobeAdd], axis=1, ignore_index=True)\n\n ax = axs[1, 0]\n hb = ax.hexbin(temp_x, temp_y, gridsize=100, cmap=\"inferno\")\n ax.set_xlabel('1st PCA')\n ax.set_ylabel('2nd PCA')\n ax.set_title(\"Posterior Region - 1st and 2nd PCA\")\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label(\"N\")\n\n ax = axs[1, 1]\n hb = ax.hexbin(temp_x, temp_z, gridsize=100, cmap=\"inferno\")\n ax.set_xlabel('1st PCA')\n ax.set_ylabel('3rd PCA')\n ax.set_title(\"Posterior Region - 1st and 3rd PCA\")\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label(\"N\")\n\n ax = axs[1, 2]\n hb = ax.hexbin(temp_y, temp_z, gridsize=100, cmap=\"inferno\")\n ax.set_xlabel('2nd PCA')\n ax.set_ylabel('3rd PCA')\n ax.set_title(\"Posterior Region - 2nd and 3rd PCA\")\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label(\"N\")\n\n del test_data_std, sklearn_pca, fit_data, test, temp_x, temp_y, temp_z, coord, tobeAdd\n\n\n Anterior = {\"1st PCA\": x1, \"2nd PCA\": y1, \"3rd PCA\": z1}\n AnteriorDF = pd.DataFrame(data=Anterior)\n\n # fig.tight_layout()\n\n\n plt.tight_layout()\n\n fig.suptitle(name[i], y=0.99)\n plt.subplots_adjust(top=0.95)\n\n plt.savefig('/Users/Sam/Dropbox/test_data_2/%s.png' %name[i])\n # plt.show()\n\n\n\n" }, { "alpha_fraction": 0.8153846263885498, "alphanum_fraction": 0.8153846263885498, "avg_line_length": 31.375, "blob_id": "557331d82e46bcfe70939376c3485ba23caca614", "content_id": "49895716da3ff9b2e5b50427400a53928b475901", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 62, "num_lines": 8, "path": "/Python/data_organizing.py", "repo_name": "samsam2610/Bone-Project", "src_encoding": "UTF-8", "text": "import scipy.io as sco\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA as sklearnPCA\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nmat = sco.loadmat(\"/Users/Sam/Dropbox/Data/pca/cleanData.mat\")\n\n" }, { "alpha_fraction": 0.5767317414283752, "alphanum_fraction": 0.6138538122177124, "avg_line_length": 28.35955047607422, "blob_id": "256e4d7f3f3c7106ab376ea4a05182ee362ae5c4", "content_id": "476309c915f27f5b53be1b15dc430cc7aa385d4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2613, "license_type": "no_license", "max_line_length": 91, "num_lines": 89, "path": "/Python/phantom_pca.py", "repo_name": "samsam2610/Bone-Project", "src_encoding": "UTF-8", "text": "import scipy.io as sco\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA as sklearnPCA\nimport xlrd\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nmat = sco.loadmat(\"/Users/Sam/Dropbox/Data/phantom_clean_data.mat\")\nname_data = open(\"/Users/Sam/Dropbox/Data/name_data.txt\", mode = 'r')\nname = name_data.readline().split(',')\n\nclean_data = np.array(mat['cleanData'])\ndel mat\n\n\nx1 = np.zeros([1, 1])\ny1 = np.zeros([1, 1])\nz1 = np.zeros([1, 1])\nx2 = np.zeros([1, 1])\ny2 = np.zeros([1, 1])\nz2 = np.zeros([1, 1])\ndata = pd.DataFrame()\nfor i in range (0, len(name)):\n fig, axs = plt.subplots(nrows=1, ncols=3, sharex=False, sharey=False, figsize=[22, 11])\n pos = i\n test_data_std = np.delete(clean_data[:, pos * 5 - 5:pos * 5 - 1], 0, 0)\n test_data_std = test_data_std[test_data_std[:, 0] != 0, 1:4]\n test_data_std = StandardScaler().fit_transform(test_data_std)\n sklearn_pca = sklearnPCA(n_components=3)\n fit_data = sklearn_pca.fit_transform(test_data_std)\n test = fit_data[:, 0:3]\n temp_x = test[:, 0]\n temp_y = test[:, 1]\n temp_z = test[:, 2]\n\n x1 = np.append(x1, temp_x)\n y1 = np.append(y1, temp_y)\n z1 = np.append(z1, temp_z)\n coord = np.column_stack((temp_x, temp_y, temp_z))\n tobeAdd = pd.DataFrame(coord)\n data = pd.concat([data, tobeAdd], axis=1, ignore_index=True)\n\n ax = axs[0]\n hb = ax.hexbin(temp_x, temp_y, gridsize=100, cmap=\"inferno\")\n ax.set_xlabel('1st PCA')\n ax.set_ylabel('2nd PCA')\n ax.set_xlim(-3, 3)\n ax.set_ylim(-3, 3)\n ax.set_title(\"1st and 2nd PCA\")\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label(\"N\")\n\n ax = axs[1]\n hb = ax.hexbin(temp_x, temp_z, gridsize=100, cmap=\"inferno\")\n ax.set_xlabel('1st PCA')\n ax.set_ylabel('3rd PCA')\n ax.set_xlim(-3, 3)\n ax.set_ylim(-3, 3)\n ax.set_title(\"1st and 3rd PCA\")\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label(\"N\")\n\n ax = axs[2]\n hb = ax.hexbin(temp_y, temp_z, gridsize=100, cmap=\"inferno\")\n ax.set_xlabel('2nd PCA')\n ax.set_ylabel('3rd PCA')\n ax.set_xlim(-3, 3)\n ax.set_ylim(-3, 3)\n ax.set_title(\"2nd and 3rd PCA\")\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label(\"N\")\n\n del test_data_std, sklearn_pca, fit_data, test, temp_x, temp_y, temp_z, coord, tobeAdd\n\n\n Anterior = {\"1st PCA\": x1, \"2nd PCA\": y1, \"3rd PCA\": z1}\n AnteriorDF = pd.DataFrame(data=Anterior)\n\n fig.tight_layout()\n\n\n plt.tight_layout()\n\n fig.suptitle(name[i], y=0.99)\n plt.subplots_adjust(top=0.95)\n\n plt.savefig('/Users/Sam/Dropbox/test_data_2/%s.png' %name[i])\n # plt.show()\n" }, { "alpha_fraction": 0.7540341019630432, "alphanum_fraction": 0.759566605091095, "avg_line_length": 44.17708206176758, "blob_id": "b8e9f871d84be611de08efe5fcc26de029ba90a0", "content_id": "cb870a357bdaf40f104048e3073d50df7fa895bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8676, "license_type": "no_license", "max_line_length": 106, "num_lines": 192, "path": "/Python/SegmentationThreshold.py", "repo_name": "samsam2610/Bone-Project", "src_encoding": "UTF-8", "text": "from __main__ import vtk, qt, ctk, slicer\n\n#\n# HelloLaplace\n#\n\nclass SegmentationThreshold:\n def __init__(self, parent):\n parent.title = \"Segmentations by thresholding\"\n parent.categories = [\"Examples\"]\n parent.dependencies = []\n parent.contributors = [\"Sam Tran\"] # replace with \"Firstname Lastname (Org)\"\n parent.helpText = \"\"\"\n Example of scripted loadable extension for the HelloLaplace tutorial.\n \"\"\"\n parent.acknowledgementText = \"\"\n # replace with organization, grant and thanks.\n self.parent = parent\n\n#\n# qHelloPythonWidget\n#\n\nclass SegmentationThresholdWidget:\n def __init__(self, parent = None):\n if not parent:\n self.parent = slicer.qMRMLWidget()\n self.parent.setLayout(qt.QVBoxLayout())\n self.parent.setMRMLScene(slicer.mrmlScene)\n else:\n self.parent = parent\n self.layout = self.parent.layout()\n if not parent:\n self.setup()\n self.parent.show()\n\n def setup(self):\n # Collapsible button\n self.segmentationCollapsibleButton = ctk.ctkCollapsibleButton()\n self.segmentationCollapsibleButton.text = \"Segmentation Operator\"\n self.layout.addWidget(self.segmentationCollapsibleButton)\n\n # Layout within the laplace collapsible button\n self.laplaceFormLayout = qt.QFormLayout(self.segmentationCollapsibleButton)\n\n # the volume selectors\n self.inputFrame = qt.QFrame(self.segmentationCollapsibleButton)\n self.inputFrame.setLayout(qt.QHBoxLayout())\n self.laplaceFormLayout.addWidget(self.inputFrame)\n self.inputSelector = qt.QLabel(\"Input Volume: \", self.inputFrame)\n self.inputFrame.layout().addWidget(self.inputSelector)\n self.inputSelector = slicer.qMRMLNodeComboBox(self.inputFrame)\n self.inputSelector.nodeTypes = ( (\"vtkMRMLScalarVolumeNode\"), \"\" )\n self.inputSelector.addEnabled = False\n self.inputSelector.removeEnabled = False\n self.inputSelector.setMRMLScene( slicer.mrmlScene )\n self.inputFrame.layout().addWidget(self.inputSelector)\n\n self.outputFrame = qt.QFrame(self.segmentationCollapsibleButton)\n self.outputFrame.setLayout(qt.QHBoxLayout())\n self.laplaceFormLayout.addWidget(self.outputFrame)\n self.outputSelector = qt.QLabel(\"Output Volume: \", self.outputFrame)\n self.outputFrame.layout().addWidget(self.outputSelector)\n self.outputSelector = slicer.qMRMLNodeComboBox(self.outputFrame)\n self.outputSelector.nodeTypes = ( (\"vtkMRMLScalarVolumeNode\"), \"\" )\n self.outputSelector.setMRMLScene( slicer.mrmlScene )\n self.outputFrame.layout().addWidget(self.outputSelector)\n\n self.thresholdSliderLabel = qt.QFrame(self.segmentationCollapsibleButton)\n self.thresholdSliderLabel.setLayout(qt.QHBoxLayout())\n self.thresholdSliderLabel = qt.QLabel(\"Threshold Range:\")\n self.thresholdSliderLabel.setToolTip(\"Set the range of the background values that should be labeled.\")\n self.outputFrame.layout().addWidget(self.outputSelector)\n # self.scriptedEffect.addOptionsWidget(self.thresholdSliderLabel)\n\n # self.thresholdSlider = ctk.ctkRangeWidget()\n # self.thresholdSlider.spinBoxAlignment = qt.Qt.AlignTop\n # self.thresholdSlider.singleStep = 0.01\n # self.scriptedEffect.addOptionsWidget(self.thresholdSlider)\n\n # Apply total segmentation button\n segmentationTotalButton = qt.QPushButton(\"Apply Total Segmentations\")\n segmentationTotalButton.toolTip = \"Run the Total Segmentation Operator.\"\n self.laplaceFormLayout.addWidget(segmentationTotalButton)\n segmentationTotalButton.connect('clicked(bool)', self.onTotalApply)\n\n # Apply group segmentation button\n segmentationGroupButton = qt.QPushButton(\"Apply Group Segmentations\")\n segmentationGroupButton.toolTip = \"Run the Group Segmentation Operator.\"\n self.laplaceFormLayout.addWidget(segmentationGroupButton)\n segmentationGroupButton.connect('clicked(bool)', self.onGroupApply)\n\n\n # Add vertical spacer\n self.layout.addStretch(1)\n\n # Set local var as instance attribute\n self.segmentationTotalButton = segmentationTotalButton\n self.segmentationGroupButton = segmentationGroupButton\n\n def segmentationTotalBone(self, inputVolume):\n # masterVolumeNode = slicer.util.getNode(inputVolume)\n masterVolumeNode = inputVolume\n thresholdStep = 10\n anatomicalUnits = [[\"Plate\", 150, 200],[\"Rod\", 201, 254],[\"Bone\", 255, 255]]\n for anatomicalUnit, thresholdMin, thresholdMax in anatomicalUnits:\n # Create segmentation\n segmentationNode = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLSegmentationNode\", anatomicalUnit)\n segmentationNode.CreateDefaultDisplayNodes() # only needed for display\n segmentationNode.SetReferenceImageGeometryParameterFromVolumeNode(masterVolumeNode)\n\n # Create temporary segment editor to get access to effects\n segmentEditorWidget = slicer.qMRMLSegmentEditorWidget()\n segmentEditorWidget.setMRMLScene(slicer.mrmlScene)\n segmentEditorNode = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLSegmentEditorNode\")\n segmentEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode)\n segmentEditorWidget.setSegmentationNode(segmentationNode)\n segmentEditorWidget.setMasterVolumeNode(masterVolumeNode)\n\n thresholdRange = list(range(thresholdMin, thresholdMax + 1, thresholdStep));\n\n if max(thresholdRange) < thresholdMax:\n thresholdRange.append(thresholdMax)\n elif min(thresholdRange) == max(thresholdRange):\n thresholdRange.append(thresholdMax)\n\n # Create segments by thresholding grayscale range \n for thresholdMinLocal, thresholdMaxLocal in zip(thresholdRange, thresholdRange[1:]):\n addedSegmentID = segmentationNode.GetSegmentation().AddEmptySegment()\n segmentEditorNode.SetSelectedSegmentID(addedSegmentID)\n # Fill by thresholding\n segmentEditorWidget.setActiveEffectByName(\"Threshold\")\n effect = segmentEditorWidget.activeEffect()\n effect.setParameter(\"MinimumThreshold\",str(thresholdMinLocal))\n effect.setParameter(\"MaximumThreshold\",str(thresholdMaxLocal))\n effect.self().onApply()\n\n # Delete temporary segment editor\n segmentEditorWidget = None\n slicer.mrmlScene.RemoveNode(segmentEditorNode)\n\n\n def segmentationGroupBone(self, inputVolume):\n # masterVolumeNode = slicer.util.getNode(inputVolume)\n masterVolumeNode = inputVolume\n thresholdStep = 10\n # Create segmentation\n segmentationNode = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLSegmentationNode\", \"Full Sample\")\n segmentationNode.CreateDefaultDisplayNodes() # only needed for display\n segmentationNode.SetReferenceImageGeometryParameterFromVolumeNode(masterVolumeNode)\n\n # Create temporary segment editor to get access to effects\n segmentEditorWidget = slicer.qMRMLSegmentEditorWidget()\n segmentEditorWidget.setMRMLScene(slicer.mrmlScene)\n segmentEditorNode = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLSegmentEditorNode\")\n segmentEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode)\n segmentEditorWidget.setSegmentationNode(segmentationNode)\n segmentEditorWidget.setMasterVolumeNode(masterVolumeNode)\n\n # Create segments by thresholding grayscale range\n anatomicalUnits = [[\"Plate\", 150, 200],[\"Rod\", 201, 254],[\"Bone\", 255, 255]] \n for anatomicalUnit, thresholdMin, thresholdMax in anatomicalUnits:\n addedSegmentID = segmentationNode.GetSegmentation().AddEmptySegment(anatomicalUnit)\n segmentEditorNode.SetSelectedSegmentID(addedSegmentID)\n # Fill by thresholding\n segmentEditorWidget.setActiveEffectByName(\"Threshold\")\n effect = segmentEditorWidget.activeEffect()\n effect.setParameter(\"MinimumThreshold\",str(thresholdMin))\n effect.setParameter(\"MaximumThreshold\",str(thresholdMax))\n effect.self().onApply()\n\n\n # Delete temporary segment editor\n segmentEditorWidget = None\n slicer.mrmlScene.RemoveNode(segmentEditorNode)\n\n\n def onGroupApply(self):\n inputVolume = self.inputSelector.currentNode()\n outputVolume = self.outputSelector.currentNode()\n self.segmentationGroupBone(inputVolume)\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\n selectionNode.SetReferenceActiveVolumeID(outputVolume.GetID())\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\n\n def onTotalApply(self):\n inputVolume = self.inputSelector.currentNode()\n outputVolume = self.outputSelector.currentNode()\n self.segmentationTotalBone(inputVolume)\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\n selectionNode.SetReferenceActiveVolumeID(outputVolume.GetID())\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\n\n\n" } ]
4
frenzy382/m3u2strm
https://github.com/frenzy382/m3u2strm
6ac8716ee294d052b1639af748330ae3f9614e3e
1ecd938e7d08b74da72e24c74e0929959243d9e9
57acba5a075b6bdbbde1efc117d1803e99b50acf
refs/heads/master
2022-04-10T07:33:35.902962
2020-03-18T19:19:10
2020-03-18T19:19:10
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6074665784835815, "alphanum_fraction": 0.6277462244033813, "avg_line_length": 36.40804672241211, "blob_id": "a71266c6d8996b7daa02596950ed17b794e1245d", "content_id": "f982116ff0eb2200502404e2571461bfd322333a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6509, "license_type": "permissive", "max_line_length": 124, "num_lines": 174, "path": "/m3u2strm.py", "repo_name": "frenzy382/m3u2strm", "src_encoding": "UTF-8", "text": "# This Python file uses the following encoding: utf-8\n'''\nThis is a python script to parse an m3u file that has a list of VOD media, and create a folder structure like follows:\n\n/workspace folder\n /groupname\n /title(year)\n /resolution\n title.strm\n\ntitle.strm is a text file which has the URL to the stream inside of it\n\nthe strm files can then be used in your emby media server as defined here:\nhttps://support.emby.media/support/solutions/articles/44001159147-strm-files\nadditional reference material here:\nhttps://emby.media/community/index.php?/topic/674-media-files-folders-structure/\nhttps://support.emby.media/support/solutions/articles/44001159102-movie-naming\nhttps://support.emby.media/support/solutions/articles/44001159319-library-setup\n\nI plan to do some more work to this at some point, but i just needed something quick n dirty. \n\nChange the m3u file name to the m3u file you want to use, and root directory is relative to\nthe working directory which the python script is executed in ...\n\n'''\n\nimport os\n#this should be the name of your m3u and in the same directory as this python file.\nm3ufile = \"liveTV.2020.03.12 (1).m3u\"\n#root directory should be created already, it is where the group folders will be located. relative to the working directory.\nrootdirectory = \"strms\"\n#create the strms directory if it does not exist:\nif not os.path.exists(rootdirectory):\n os.mkdir(rootdirectory)\n print('Created Streams Directory:', rootdirectory)\nelse:\n print('Streams Directory Found', rootdirectory)\nm3ulist = open(m3ufile,\"r\")\nstreamlist = m3ulist.read()\n#parse the file into an array of streams\nstreams = streamlist.split(\"#EXTINF:0 \")\n#delete the first element of the streams array since it is identifying the m3u file\ndel streams[0]\nmediadictionary = {}\nprint(\"streams length\", len(streams))\n#iterate over all the streams and parse the information for each content into a list, then put that list into the dictionary\nfor i in range(len(streams)):\n#for i in range(50):\n stream = []\n lines = streams[i].split(\"\\n\")\n if i+1 != len(streams):\n del lines[3]\n #parse the first line to get the resolution, title, and year\n resolutionandtitle = lines[0].split(\",\")[1].split(\" : \")\n title = ' '.join(resolutionandtitle[1].split(\" \")[:-1])\n stream.append(title)\n \n resolution = resolutionandtitle[0]\n stream.append(resolution)\n\n year = resolutionandtitle[1].split(\" \")[-1]\n stream.append(year)\n\n #get the group name from line 2\n group = lines[1].split(\"#EXTGRP:\")[1]\n stream.append(group)\n\n #get the URL to the stream from line 3\n link = lines[2]\n stream.append(link)\n #add the stream to the dictionary\n mediadictionary[i] = stream\n\n'''\nfor media in mediadictionary:\n print(media, mediadictionary[media])\n'''\nfor i in range(len(mediadictionary)):\n md = mediadictionary[i]\n print(md)\n groupdirectory = '/'.join((rootdirectory,md[3]))\n resolution = \"\"\n if md[1] == \"HD\":\n resolution = \"720p\"\n elif md[1] == \"SD\":\n resolution = \"480p\"\n else:\n resolution = md[1]\n if md[2] == \"\":\n md[2] = \"null\"\n if not os.path.exists(groupdirectory):\n os.mkdir(groupdirectory)\n print('Created Group Directory:', groupdirectory)\n else:\n print('Group Directory Found', groupdirectory)\n if md[3] == \"Movie VOD\":\n moviewithyear = (md[0] + \" (\" + md[2] + \")\")\n moviedirectory = '/'.join((groupdirectory,moviewithyear))\n filename = moviedirectory + \"/\" + (\" - \".join((moviewithyear, resolution))) + \".strm\"\n if not os.path.exists(moviedirectory):\n os.mkdir(moviedirectory)\n print('Created Movie Directory:', moviedirectory)\n else:\n print('Movie Directory Found', moviedirectory)\n if not os.path.exists(filename):\n streamfile = open(filename, \"w+\")\n streamfile.write(md[4])\n streamfile.close\n print(\"strm file created:\", filename)\n streamfile.close()\n else:\n print(\"stream file already found\")\n else:\n showdirectory = \"\"\n title = \"\"\n date = \"\"\n if list(md[2])[0] == \"S\" and list(md[2])[3] == \"E\":\n showdirectory = '/'.join((groupdirectory,md[0]))\n showwithepisode = (md[0] + \" (\" + md[2] + \")\")\n filename = showdirectory + \"/\" + (\" - \".join((showwithepisode, resolution))) + \".strm\"\n elif list(md[2])[0] == \"S\":\n showdirectory = '/'.join((groupdirectory,md[0]))\n showwithepisode = (md[0] + \" (\" + md[2] + \")\")\n filename = showdirectory + \"/\" + (\" - \".join((showwithepisode, resolution))) + \".strm\"\n else:\n titlestring = md[0].split(\" \")\n title = \"\"\n date = \"\"\n vset = False\n for i in range(len(titlestring)):\n if vset == True:\n break\n try:\n int(titlestring[i])\n except:\n if i+1 == len(titlestring):\n showdirectory = '/'.join((groupdirectory,md[0]))\n filename = showdirectory + \"/\" + (\" - -\".join((md[0], resolution))) + \".strm\"\n continue\n else:\n try:\n int(titlestring[i+1])\n except:\n showdirectory = '/'.join((groupdirectory,md[0]))\n filename = showdirectory + \"/\" + (\" - -\".join((md[0], resolution))) + \".strm\"\n else:\n if 1900 < int(titlestring[i]) < 2025 and 0 < int(titlestring[i+1]) <= 12 and 0 < int(titlestring[i+2]) <= 31:\n date = \"-\".join((titlestring[i], titlestring[i+1], titlestring[i+2]))\n title = \" \".join(titlestring[:i])\n showdirectory = '/'.join((groupdirectory,title))\n filename = showdirectory + \"/\" + (\"-\".join((title,date))) + \" - \" + resolution + \".strm\"\n vset = True\n elif 0 < int(titlestring[i]):\n title = \" \".join(titlestring[:i])\n showdirectory = '/'.join((groupdirectory,title))\n filename = showdirectory + \"/\" + (\" - \".join((md[0], resolution))) + \".strm\"\n vset = True\n else:\n showdirectory = '/'.join((groupdirectory,md[0]))\n filename = showdirectory + \"/\" + (\" - -\".join((md[0], resolution))) + \".strm\"\n vset = True\n if not os.path.exists(showdirectory):\n os.mkdir(showdirectory)\n print('Created show Directory:', showdirectory)\n else:\n print('Show Directory Found', showdirectory)\n if not os.path.exists(filename):\n streamfile = open(filename, \"w+\")\n streamfile.write(md[4])\n streamfile.close\n print(\"strm file created:\", filename)\n streamfile.close()\n else:\n print(\"stream file already found\")\n" }, { "alpha_fraction": 0.6274623274803162, "alphanum_fraction": 0.646388590335846, "avg_line_length": 33.06578826904297, "blob_id": "92fda75e55bc958b983de32765deaffcd845fc0e", "content_id": "c202a10f52e90fccf2f66a23177ae8b5a13a64e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5178, "license_type": "permissive", "max_line_length": 130, "num_lines": 152, "path": "/m3u2strm-2.py", "repo_name": "frenzy382/m3u2strm", "src_encoding": "UTF-8", "text": "'''\nThis is a python script to parse an m3u file that has a list of VOD media, and create a folder structure like follows:\n\n/workspace folder\n /groupname\n /title(year)\n /resolution\n title.strm\n\ntitle.strm is a text file which has the URL to the stream inside of it\n\nthe strm files can then be used in your emby media server as defined here:\nhttps://support.emby.media/support/solutions/articles/44001159147-strm-files\nadditional reference material here:\nhttps://emby.media/community/index.php?/topic/674-media-files-folders-structure/\nhttps://support.emby.media/support/solutions/articles/44001159102-movie-naming\nhttps://support.emby.media/support/solutions/articles/44001159319-library-setup\n\nI plan to do some more work to this at some point, but i just needed something quick n dirty. \n\nChange the m3u file name to the m3u file you want to use, and root directory is relative to\nthe working directory which the python script is executed in ...\n'''\nimport os\n#this should be the name of your m3u and in the same directory as this python file.\nm3ufile = \"test3.m3u\"\n#root directory should be created already, it is where the group folders will be located. relative to the working directory.\nrootdirectory = \"strms\"\n\nm3ulist = open(m3ufile,\"r\")\nstreamlist = m3ulist.read()\nmediadictionary = {}\n#parse the file into an array of streams\nstreams = streamlist.split(\"#EXTINF:\")\ndel streams[0]\n#delete the first element of the streams array since it is identifying the m3u file\nfor i in range(len(streams)):\n#for i in range(50):\n stream = []\n case = \"\"\n lines = streams[i].split(\"\\n\")\n if i+1 != len(streams):\n del lines[2]\n '''for line in lines:\n print(line, \"lon\", i)'''\n line1 = lines[0].split(\",\")\n del line1[0]\n info = line1[0].split(\"|\")\n stream.append(info[1]) #language\n if len(info) > 3:\n case = \"tv\"\n else:\n case = \"movie\"\n #for movie the stream structure is [language, title, streamlink]\n if case == \"movie\":\n title = info[2].split(\" \")\n del title[0]\n del title[-1]\n stream.append(\"Movie\")\n stream.append(' '.join(title))\n stream.append(lines[1])\n \n #for tv the stream structure is [language, seriesname, seriesnumber, episodenumber, episodetitle]\n if case == \"tv\":\n stream.append(\"TV\")\n seriesinfo = info[2].split(\" \")\n withepisodename = False\n del seriesinfo[0]\n del seriesinfo[-1]\n if seriesinfo[-1][0] == \"S\":\n stream.append(\" \".join(seriesinfo[0:-1]))\n stream.append(\"\".join([seriesinfo[-1][1], seriesinfo[-1][2]]))\n else:\n stream.append(\"\".join([seriesinfo[0:-1]]))\n stream.append(\"\")\n episodeinfo = info[-1].split(\" \")\n del episodeinfo[0]\n for j in range(len(episodeinfo)):\n try:\n int(episodeinfo[j][0])\n int(episodeinfo[j][1])\n int(episodeinfo[j][3])\n int(episodeinfo[j][4])\n except:\n if episodeinfo[j] == \"-\":\n withepisodename = True\n else:\n if withepisodename == True:\n continue\n else:\n withepisodename = False\n else:\n stream.append(\"\".join([episodeinfo[j][3], episodeinfo[j][4]]))\n if withepisodename == True:\n episode = list(info[-1].split(\"-\")[-1])\n del episode[0]\n stream.append(\"\".join(episode))\n else:\n stream.append(\"\")\n stream.append(lines[1])\n mediadictionary[i] = stream\nfor i in range(len(mediadictionary)):\n md = mediadictionary[i]\n #print(md)\n typedirectory = md[1]\n language = md[0]\n url = md[-1]\n if not os.path.exists(typedirectory):\n os.mkdir(typedirectory)\n print('Created Type Directory:', typedirectory)\n else:\n print('Type Directory Found', typedirectory)\n if typedirectory == \"Movie\":\n title = md[2]\n moviedirectory = '/'.join((typedirectory,title))\n filename = moviedirectory + \"/\" + title + \" - [\" + language + \"]\" + \".strm\"\n print(filename)\n if not os.path.exists(moviedirectory):\n os.mkdir(moviedirectory)\n print('Created Movie Directory:', moviedirectory)\n else:\n print('Movie Directory Found', moviedirectory)\n if not os.path.exists(filename):\n streamfile = open(filename, \"w+\")\n streamfile.write(url)\n streamfile.close\n print(\"strm file created:\", filename)\n streamfile.close()\n else:\n print(\"stream file already found\")\n else:\n seriesname = md[2]\n seriesandepisode = \"S\" + md[3] + \"E\" + md[4]\n showdirectory = typedirectory + \"/\" + seriesname\n if md[5] != \"\":\n episodename = md[5]\n filename = showdirectory + \"/\" + \" - \".join((seriesname, seriesandepisode, episodename)) + \" - [\" + language + \"]\" + \".strm\"\n else:\n filename = showdirectory + \"/\" + \" - \".join((seriesname, seriesandepisode)) + \" - [\" + language + \"]\" + \".strm\"\n if not os.path.exists(showdirectory):\n os.mkdir(showdirectory)\n print('Show Movie Directory:', showdirectory)\n else:\n print('Show Directory Found', showdirectory)\n if not os.path.exists(filename):\n streamfile = open(filename, \"w+\")\n streamfile.write(url)\n streamfile.close\n print(\"strm file created:\", filename)\n streamfile.close()\n else:\n print(\"stream file already found\")\n" } ]
2
PedroPenaUSF/StopwatchKattis
https://github.com/PedroPenaUSF/StopwatchKattis
f1ec22143812913fca28df2dc3dacfe4451db61f
088cdfc040e1b5a2684be0d6ae0bb1b720cfdd66
9196b8275cbc8d02f3c643d94390c8131f828011
refs/heads/master
2023-07-25T14:00:30.621121
2021-09-10T11:57:34
2021-09-10T11:57:34
405,064,443
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.3769230842590332, "alphanum_fraction": 0.4000000059604645, "avg_line_length": 16.33333396911621, "blob_id": "a86dd6987ff427ad15752a36ece996daaf7c09a4", "content_id": "a1bca5fb3e8ec5356d2ee1b26571ea493679ced1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 27, "num_lines": 15, "path": "/main.py", "repo_name": "PedroPenaUSF/StopwatchKattis", "src_encoding": "UTF-8", "text": "n = int(input())\ni = 0\ntime = 0\nif n % 2 != 0:\n while i < n:\n input()\n i = i + 1\n print('still running')\nelse:\n while i < n:\n a = int(input())\n b = int(input())\n time = time + b - a\n i = i + 2\n print(time)\n" } ]
1
Fatemeh-Zabihollahy/3DLGE_Scar_XYZ
https://github.com/Fatemeh-Zabihollahy/3DLGE_Scar_XYZ
fa2f0959477c65fc0e81cbcf9fa19e0c3a78746d
355799855fd72370c4a001972a4a4fa4c2a97cb0
646516b1bf369d04094f0858a493c1fa5e879ede
refs/heads/master
2022-07-16T14:40:36.317439
2020-05-13T13:44:28
2020-05-13T13:44:28
263,144,920
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5346862077713013, "alphanum_fraction": 0.5506200790405273, "avg_line_length": 32.369510650634766, "blob_id": "1bf16fb5770ff431610c19020ae4f811a09d3b39", "content_id": "905b90c13e384211f6809d8705cd12af58cc0329", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13305, "license_type": "no_license", "max_line_length": 125, "num_lines": 387, "path": "/LGE_Myo_Scar_XYZ.py", "repo_name": "Fatemeh-Zabihollahy/3DLGE_Scar_XYZ", "src_encoding": "UTF-8", "text": "#%% Import required libraries\r\n\r\nimport numpy\r\nfrom PIL import Image\r\nfrom sklearn.metrics import f1_score, accuracy_score, precision_score, recall_score\r\nimport scipy\r\nfrom skimage import morphology\r\nfrom keras.models import Model, load_model\r\nfrom keras.layers.core import Dropout\r\nfrom keras.layers.merge import concatenate\r\nfrom keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping\r\nimport nibabel as nib\r\nimport glob\r\nfrom matplotlib import pyplot as plt\r\n\r\npath1 = r'Please provide the path where the 3D LGE CMRIs in the .nii format are located.'\r\nLGEs = glob.glob(path1 + \"/*\")\r\n\r\npath2 = r'Please provide the path where the myocardial masks created from our algorithm are located.'\r\nMYOs = glob.glob(path2 + \"/*\")\r\n\r\npath3 = r'Please provide the path where the ground truth of scar tissue in the .nii format are located.'\r\nSCARs = glob.glob(path3 + \"/*\")\r\n\r\n#%\r\nsmooth = 1.\r\ndef dice_coef(y_true, y_pred):\r\n y_true_f = K.flatten(y_true)\r\n y_pred_f = K.flatten(y_pred)\r\n intersection = K.sum(y_true_f * y_pred_f)\r\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\r\n\r\ndef dice_coef_loss(y_true, y_pred):\r\n return -dice_coef(y_true, y_pred)\r\n#%\r\nscar_xy_model = load_model('segment_scar_xy.hdf5', custom_objects={'dice_coef': dice_coef,'dice_coef_loss': dice_coef_loss})\r\nscar_xz_model = load_model('segment_scar_xz.hdf5', custom_objects={'dice_coef': dice_coef,'dice_coef_loss': dice_coef_loss}) \r\nscar_yz_model = load_model('segment_scar_yz.hdf5', custom_objects={'dice_coef': dice_coef,'dice_coef_loss': dice_coef_loss}) \r\n#%%\r\n\r\n\r\ndef myo_segment(img_test):\r\n myo_pred = scar_xy_model.predict(img_test, batch_size=1, verbose=1)\r\n myo_pred = myo_pred.reshape(x_unet, y_unet)\r\n myo_pred = (myo_pred > 0.5).astype(numpy.uint8)\r\n \r\n myo_clean = numpy.array(myo_pred, bool)\r\n myo_clean = morphology.remove_small_objects(myo_clean,100) \r\n myo_clean = myo_clean*1 \r\n \r\n myo_clean = scipy.ndimage.morphology.binary_dilation(myo_clean, iterations=3)\r\n myo_clean = scipy.ndimage.morphology.binary_erosion(myo_clean)\r\n return(myo_clean)\r\n \r\n\r\ndef model_xy_evaluate(data,mask):\r\n \r\n \r\n for k in range(len(data)):\r\n mask_sample = mask[k,:,:,:]\r\n mask_sample = mask_sample.reshape(x_unet, y_unet)\r\n\r\n img_test = data[k,:, :, :]\r\n img_test = img_test.reshape(1, x_unet, y_unet, 1)\r\n \r\n myo_clean = myo_segment(img_test)\r\n myo_clean = myo_clean[:x, :y]\r\n \r\n \r\n mask_clean = scipy.ndimage.morphology.binary_dilation(mask_sample, iterations=2)\r\n mask_clean = scipy.ndimage.morphology.binary_erosion(mask_clean)\r\n mask_clean = mask_clean*1\r\n mask_clean = mask_clean[:x, :y]\r\n \r\n myo_xy[:,:,k] = myo_clean\r\n \r\n \r\n \r\n return()\r\n \r\n \r\n \r\ndef model_xz_evaluate(data,mask):\r\n \r\n for k in range(len(data)):\r\n mask_sample = mask[k,:,:,:]\r\n mask_sample = mask_sample.reshape(x_unet, y_unet)\r\n img_test = data[k,:, :, :]\r\n img_test = img_test.reshape(1, x_unet, y_unet, 1)\r\n img_pred = scar_xz_model.predict(img_test, batch_size=1, verbose=1)\r\n img_pred = img_pred.reshape(x_unet, y_unet)\r\n img_pred = (img_pred > 0.5).astype(numpy.uint8)\r\n \r\n seg_clean = numpy.array(img_pred, bool)\r\n seg_clean = morphology.remove_small_objects(seg_clean,100) \r\n seg_clean = seg_clean*1 \r\n \r\n seg_clean = scipy.ndimage.morphology.binary_dilation(seg_clean, iterations=3)\r\n seg_clean = scipy.ndimage.morphology.binary_erosion(seg_clean)\r\n seg_clean = seg_clean*1 \r\n seg_clean = seg_clean[:x, :z]\r\n \r\n \r\n myo_xz[:,k,:] = seg_clean\r\n \r\n \r\n return() \r\n \r\n\r\n \r\ndef model_yz_evaluate(data,mask):\r\n \r\n \r\n for k in range(len(data)):\r\n mask_sample = mask[k,:,:,:]\r\n mask_sample = mask_sample.reshape(x_unet, y_unet)\r\n img_test = data[k,:, :, :]\r\n img_test = img_test.reshape(1, x_unet, y_unet, 1)\r\n img_pred = scar_yz_model.predict(img_test, batch_size=1, verbose=1)\r\n img_pred = img_pred.reshape(x_unet, y_unet)\r\n img_pred = (img_pred > 0.5).astype(numpy.uint8)\r\n \r\n seg_clean = numpy.array(img_pred, bool)\r\n seg_clean = morphology.remove_small_objects(seg_clean,100) \r\n seg_clean = seg_clean*1 \r\n \r\n seg_clean = scipy.ndimage.morphology.binary_dilation(seg_clean, iterations=3)\r\n seg_clean = scipy.ndimage.morphology.binary_erosion(seg_clean)\r\n seg_clean = seg_clean*1 \r\n seg_clean = seg_clean[:y, :z]\r\n \r\n\r\n myo_yz[k,:,:] = seg_clean\r\n \r\n \r\n return() \r\n \r\n\r\n\r\n \r\ndef Create_XY_data(lge,sacr):\r\n \r\n \r\n lge_norm = numpy.zeros((x,y,z))\r\n for slice_no in range (z):\r\n lge_slice = lge[:, :, slice_no]\r\n for a in range (x):\r\n for b in range (y):\r\n if lge_slice[a,b] > 1000:\r\n lge_slice[a,b] = numpy.median(lge_slice)\r\n if (numpy.max(lge_slice != 0)): \r\n lge_slice = (lge_slice-lge_slice.min())/(lge_slice.max()-lge_slice.min())\r\n lge_norm[:, :, slice_no] = lge_slice\r\n\r\n\r\n data = numpy.zeros((1,x_unet*y_unet))\r\n mask_myo = numpy.zeros((1,x_unet*y_unet))\r\n \r\n \r\n x_pad = int(x_unet - x)\r\n y_pad = int(y_unet - y)\r\n \r\n for page in range(0,z): \r\n lge_slice = lge_norm[:,:,page]\r\n myo_slice = scar[:,:,page]\r\n \r\n lge_slice = numpy.pad(lge_slice, ((0, x_pad),(0, y_pad)), 'wrap')\r\n myo_slice = numpy.pad(myo_slice, ((0, x_pad),(0, y_pad)), 'wrap')\r\n \r\n lge_slice = lge_slice.reshape(1,(x_unet*y_unet))\r\n myo_slice = myo_slice.reshape(1, (x_unet*y_unet))\r\n \r\n data = numpy.vstack((data,lge_slice ))\r\n mask_myo = numpy.vstack((mask_myo,myo_slice))\r\n\r\n data = numpy.delete(data, (0), axis=0) \r\n mask_myo = numpy.delete(mask_myo, (0), axis=0) \r\n \r\n data = data.reshape(data.shape[0], x_unet, y_unet, 1)\r\n mask_myo = mask_myo.reshape(mask_myo.shape[0], x_unet, y_unet, 1)\r\n \r\n model_xy_evaluate(data,mask_myo)\r\n \r\n \r\n return()\r\n \r\n \r\n \r\ndef Create_XZ_data(lge,scar):\r\n \r\n \r\n lge_norm = numpy.zeros((x,y,z))\r\n for slice_no in range (y):\r\n lge_slice = lge[:,slice_no,:]\r\n for a in range (x):\r\n for b in range (z):\r\n if lge_slice[a,b] > 1000:\r\n lge_slice[a,b] = numpy.median(lge_slice)\r\n if (numpy.max(lge_slice != 0)): \r\n lge_slice = (lge_slice-lge_slice.min())/(lge_slice.max()-lge_slice.min())\r\n lge_norm[:,slice_no,:] = lge_slice\r\n \r\n \r\n data = numpy.zeros((1,x_unet*y_unet))\r\n mask_myo = numpy.zeros((1,x_unet*y_unet))\r\n \r\n x_pad = int(x_unet - x)\r\n y_pad = int(y_unet - z)\r\n \r\n for page in range(0,y): \r\n lge_slice = lge_norm[:,page,:]\r\n myo_slice = scar[:,page,:]\r\n \r\n lge_slice = numpy.pad(lge_slice, ((0, x_pad),(0, y_pad)), 'wrap')\r\n myo_slice = numpy.pad(myo_slice, ((0, x_pad),(0, y_pad)), 'wrap')\r\n \r\n lge_slice = lge_slice.reshape(1,(x_unet*y_unet))\r\n myo_slice = myo_slice.reshape(1, (x_unet*y_unet))\r\n \r\n data = numpy.vstack((data,lge_slice ))\r\n mask_myo = numpy.vstack((mask_myo,myo_slice))\r\n\r\n data = numpy.delete(data, (0), axis=0) \r\n mask_myo = numpy.delete(mask_myo, (0), axis=0) \r\n \r\n data = data.reshape(data.shape[0], x_unet, y_unet, 1)\r\n mask_myo = mask_myo.reshape(mask_myo.shape[0], x_unet, y_unet, 1)\r\n model_xz_evaluate(data,mask_myo)\r\n \r\n \r\n return()\r\n \r\n \r\n \r\ndef Create_YZ_data(lge,scar):\r\n \r\n lge_norm = numpy.zeros((x,y,z))\r\n for slice_no in range (x):\r\n lge_slice = lge[slice_no,:,:]\r\n for a in range (y):\r\n for b in range (z):\r\n if lge_slice[a,b] > 1000:\r\n lge_slice[a,b] = numpy.median(lge_slice)\r\n if (numpy.max(lge_slice != 0)): \r\n lge_slice = (lge_slice-lge_slice.min())/(lge_slice.max()-lge_slice.min())\r\n lge_norm[slice_no,:,:] = lge_slice\r\n \r\n \r\n data = numpy.zeros((1,x_unet*y_unet))\r\n mask = numpy.zeros((1,x_unet*y_unet))\r\n \r\n x_pad = int(x_unet - y)\r\n y_pad = int(y_unet - z)\r\n \r\n for page in range(0,x): \r\n lge_slice = lge_norm[page,:,:]\r\n myo_slice = scar[page,:,:]\r\n \r\n lge_slice = numpy.pad(lge_slice, ((0, x_pad),(0, y_pad)), 'wrap')\r\n myo_slice = numpy.pad(myo_slice, ((0, x_pad),(0, y_pad)), 'wrap')\r\n \r\n lge_slice = lge_slice.reshape(1,(x_unet*y_unet))\r\n myo_slice = myo_slice.reshape(1, (x_unet*y_unet)) \r\n \r\n data = numpy.vstack((data,lge_slice ))\r\n mask = numpy.vstack((mask,myo_slice))\r\n\r\n data = numpy.delete(data, (0), axis=0) \r\n \r\n mask = numpy.delete(mask, (0), axis=0) \r\n \r\n data = data.reshape(data.shape[0], x_unet, y_unet, 1)\r\n mask = mask.reshape(mask.shape[0], x_unet, y_unet, 1)\r\n model_yz_evaluate(data,mask)\r\n \r\n \r\n return()\r\n \r\n \r\n#%% Create test dataset and test unseen images.\r\nx_unet = 256\r\ny_unet = 256\r\ndsc_total =[]\r\nacc_total = []\r\nprec_total = []\r\nrec_total = []\r\nvol_manual = []\r\nvol_seg = []\r\nsec = []\r\n\r\nfor n in range(18,34): \r\n print(n)\r\n start_time = time.time()\r\n data_lge = nib.load(LGEs[n]);\r\n lge = data_lge.get_data()\r\n x,y,z = lge.shape\r\n \r\n data_myo = nib.load(MYOs[n-18]);\r\n myo = data_myo.get_data() \r\n \r\n \r\n data_scar = nib.load(SCARs[n]);\r\n scar = data_scar.get_data() \r\n \r\n for i in range(x):\r\n for j in range(y):\r\n for k in range(z):\r\n if myo[i,j,k] == 0:\r\n lge[i,j,k] = 0\r\n \r\n img_orig = numpy.zeros((x,y,z))\r\n myo_xy = numpy.zeros((x,y,z))\r\n myo_xz = numpy.zeros((x,y,z))\r\n myo_yz = numpy.zeros((x,y,z)) \r\n \r\n \r\n Create_XY_data(lge,scar)\r\n Create_XZ_data(lge,scar) \r\n Create_YZ_data(lge,scar)\r\n \r\n \r\n myo_final = myo_xy + myo_xz + myo_yz\r\n myo_vote = numpy.zeros((myo_final.shape))\r\n for i in range(myo_final.shape[0]):\r\n for j in range(myo_final.shape[1]):\r\n for k in range(myo_final.shape[2]):\r\n if myo_final[i,j,k] >= 2:\r\n myo_vote[i,j,k] = 1\r\n \r\n \r\n dsc = []\r\n acc = []\r\n prec = []\r\n rec = []\r\n \r\n \r\n myo_clean = numpy.zeros((myo_vote.shape))\r\n gt_clean = numpy.zeros((scar.shape))\r\n for page in range(myo_vote.shape[2]):\r\n myo_vote_slc = myo_vote[:,:,page]\r\n myo_slc = scar[:,:,page]\r\n seg_clean = numpy.array(myo_vote_slc, bool)\r\n seg_clean = morphology.remove_small_objects(seg_clean,100) \r\n seg_clean = seg_clean*1 \r\n \r\n seg_clean = scipy.ndimage.morphology.binary_dilation(seg_clean, iterations=1) \r\n seg_clean = seg_clean*1 \r\n myo_clean[:,:,page] = seg_clean\r\n \r\n myo_slc = scipy.ndimage.morphology.binary_dilation(myo_slc, iterations=3) \r\n myo_slc = myo_slc*1 \r\n gt_clean[:,:,page] = myo_slc\r\n \r\n y_true = numpy.reshape(myo_slc, (x*y,1))\r\n y_pred = numpy.reshape(seg_clean, (x*y,1)) \r\n dsc = numpy.append(dsc,f1_score(y_true, y_pred, average='macro')*100 )\r\n acc = numpy.append(acc,accuracy_score(y_true, y_pred)*100)\r\n prec = numpy.append(prec, precision_score(y_true, y_pred, average='macro')*100)\r\n rec = numpy.append(rec, recall_score(y_true, y_pred, average='macro')*100)\r\n \r\n \r\n \r\n dsc_total = numpy.append(dsc_total,numpy.mean(dsc))\r\n acc_total = numpy.append(acc_total,numpy.mean(acc))\r\n prec_total = numpy.append(prec_total,numpy.mean(prec))\r\n rec_total = numpy.append(rec_total,numpy.mean(rec)) \r\n vol_manual = numpy.append(vol_manual,numpy.sum(gt_clean)*1.3*0.625*0.625/1000)\r\n vol_seg = numpy.append(vol_seg,numpy.sum(myo_clean)*1.3*0.625*0.625/1000) \r\n sec = numpy.append(sec,(time.time() - start_time)) \r\n slice_no = slice_no + myo_clean.shape[0] + myo_clean.shape[1] + myo_clean.shape[2]\r\n\r\n \r\n \r\n#% \r\nprint('Mean Values:') \r\nprint('DI is :', round(numpy.mean(dsc_total),2) , '+', round(numpy.std(dsc_total),2))\r\nprint('Acc. is :', round(numpy.mean(acc_total),2), '+', round(numpy.std(acc_total),2))\r\nprint('Precision is :', round(numpy.mean(prec_total),2), '+', round(numpy.std(prec_total),2))\r\nprint('Recall is :', round(numpy.mean(rec_total),2), '+', round(numpy.std(rec_total),2))\r\n\r\nprint('Median Values:') \r\nprint('DI is :', round(numpy.median(dsc_total),2) , '+', round(numpy.std(dsc_total),2))\r\nprint('Acc. is :', round(numpy.median(acc_total),2), '+', round(numpy.std(acc_total),2))\r\nprint('Precision is :', round(numpy.median(prec_total),2), '+', round(numpy.std(prec_total),2))\r\nprint('Recall is :', round(numpy.median(rec_total),2), '+', round(numpy.std(rec_total),2))\r\n \r\n" }, { "alpha_fraction": 0.7962732911109924, "alphanum_fraction": 0.8024844527244568, "avg_line_length": 132.8333282470703, "blob_id": "94d430c95c4fffb9ae32a03b07b50398d703360b", "content_id": "151080073040fc0f7075272d1fdf1f308927956c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 805, "license_type": "no_license", "max_line_length": 142, "num_lines": 6, "path": "/README.md", "repo_name": "Fatemeh-Zabihollahy/3DLGE_Scar_XYZ", "src_encoding": "UTF-8", "text": "\n\nThese 8 scripts are written to segment left ventricle (LV) myocardial scar from 3D LGE CMRI. First, the myocardium must be delineated from\nthe images. To this end, LGE_Myo_XY, LGE_Myo_XZ, and LGE_Myo_YZ are used to train three U-Nets using 2D slices extracted from three orthogonal\ndirections including transversal, sagittal, and coronal. Then the 3DLGE_Myo_XYZ is employed to combine the trained models for LV myocardial\nsegmentation. The binary segmentation maps created for LV myocardium must be saved to be used in the next step for scar segmentation. \nSimilarly, three networks are trained by implementing LGE_Scar_XY, LGE_Scar_XZ, and LGE_Scar_YZ to identify the boundaries of scar \nin LV myocardium. 3DLGE_Scar_XYZ combines the prediction results to generate the segmentation map of the LV scar.\n" } ]
2
jaustinpage/notification-light
https://github.com/jaustinpage/notification-light
59f5fd153868c8157fcbd138631a19506de120a5
603a7c6fb7d7e84cf8466bd4bcb90a1b717aa524
756fddd00a80549263dc17675fac4cac36b56482
refs/heads/master
2020-04-09T11:41:07.615763
2016-04-25T15:49:27
2016-04-25T15:49:27
50,853,197
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.702194333076477, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 14.190476417541504, "blob_id": "81639aaf3d527780bdb8f5f76f5eb534962f5644", "content_id": "48286eccc5d4242d28841b36626300e9de2d079f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 319, "license_type": "permissive", "max_line_length": 43, "num_lines": 21, "path": "/Makefile", "repo_name": "jaustinpage/notification-light", "src_encoding": "UTF-8", "text": "deps:\n\tvirtualenv env\n\tsudo pip install -r requirements.txt\n\tgit clone https://github.com/todbot/blink1\n\tcd blink1\n\tgit checkout v1.98\n\tcd commandline\n\tmake\n\t\n\techo \"Run source env/bin/activate\"\n\nclean:\n\tpyclean .\n\nlint:\n\tpep8 --show-source --show-pep8 ./*.py\n\nfreeze:\n\t\tpip freeze > requirements.txt\n\ninit: deps clean\n" }, { "alpha_fraction": 0.4791666567325592, "alphanum_fraction": 0.6875, "avg_line_length": 15, "blob_id": "323de8362eda66260bdcd55041147dae6299d932", "content_id": "f45f9ee10f9d72a5806fa84f0a4fbcaa795da984", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 96, "license_type": "permissive", "max_line_length": 20, "num_lines": 6, "path": "/requirements.txt", "repo_name": "jaustinpage/notification-light", "src_encoding": "UTF-8", "text": "docutils==0.12\ninotify==0.2.4\nlockfile==0.12.2\npsutil==3.4.2\npython-daemon==2.1.1\nwheel==0.24.0\n" }, { "alpha_fraction": 0.7931034564971924, "alphanum_fraction": 0.8045976758003235, "avg_line_length": 42.5, "blob_id": "0986c9acca9868a3d49c232fdce7da91a05b5291", "content_id": "4f95fec9f7bc8b913ff8b6ff77fa086553c3d176", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "permissive", "max_line_length": 65, "num_lines": 2, "path": "/README.md", "repo_name": "jaustinpage/notification-light", "src_encoding": "UTF-8", "text": "# notification-light\nTurns blink1 red when camera is on, other colors for other events\n" }, { "alpha_fraction": 0.585457980632782, "alphanum_fraction": 0.5886055827140808, "avg_line_length": 29.84465980529785, "blob_id": "c63f6088861e176f6c8dc83b405adebfeb97fc93", "content_id": "53d8433bf4164c44f838eb21c43e764b8c0a5414", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3177, "license_type": "permissive", "max_line_length": 101, "num_lines": 103, "path": "/notification-light.py", "repo_name": "jaustinpage/notification-light", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport daemon\nfrom fnmatch import fnmatch\nimport inotify.adapters\nimport logging\nimport os\nimport subprocess\nimport time\nimport traceback\nimport sys\n\n\n_LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n_LOGGER = logging.getLogger(__name__)\n\ndef _configure_logging(log_level=logging.WARNING):\n '''\n Set up some common logging stuff. Source this from module functions or class functions\n '''\n _LOGGER.setLevel(log_level)\n \n ch = logging.StreamHandler()\n formatter = logging.Formatter(_LOG_FORMAT)\n ch.setFormatter(formatter)\n \n _LOGGER.addHandler(ch)\n _LOGGER.info(\"Logger configured\")\n\n\ndef light_red():\n try:\n subprocess.check_output(['/home/jaustinpage/github/blink1/commandline/blink1-tool', '--red'])\n except subprocess.CalledProcessError as err:\n stacktrace = traceback.format_exc(sys.exc_info())\n logging.debug('Exception in light_red() calling blink1-tool. Stacktrace:\\n%s' % stacktrace)\n\n\ndef light_off():\n try:\n subprocess.check_output(['/home/jaustinpage/github/blink1/commandline/blink1-tool', '--off'])\n except subprocess.CalledProcessError as err:\n stacktrace = traceback.format_exc(sys.exc_info())\n logging.debug('Exception in light_off() calling blink1-tool. Stacktrace:\\n%s' % stacktrace)\n\n\nclass CameraMonitor(object):\n '''\n This is a class that monitors a camera\n '''\n def __init__(self, camera_path):\n super(CameraMonitor, self).__init__()\n self.camera_path = camera_path\n\n def watch(self):\n '''\n This watches a camera, and reacts when it is changed\n '''\n self.i = inotify.adapters.Inotify()\n self.i.add_watch(self.camera_path)\n try:\n for event in self.i.event_gen():\n if event is not None:\n (header, type_names, watch_path, filename) = event\n _LOGGER.debug(\"WD=(%d) MASK=(%d) COOKIE=(%d) LEN=(%d) MASK->NAMES=%s \"\n \"WATCH-PATH=[%s] FILENAME=[%s]\",\n header.wd, header.mask, header.cookie, header.len, type_names,\n watch_path, filename)\n if header.mask == 32:\n light_red()\n if header.mask == 8:\n light_off()\n finally:\n self.i.remove_watch(self.camera_path)\n\n\n#def find_cameras():\n# '''\n# Gets all of the devices that include the word 'video' in it\n# '''\n# for f in os.listdir('/dev'):\n# if fnmatch(f, 'video*'):\n# video_camera_path = os.path.join('/dev', f)\n# logging.debug('Found video camera %s' % video_camera_path)\n# yield video_camera_path\n\n\ndef _main():\n '''\n This is the main function yo. Sets up and handles the loop\n '''\n _configure_logging(logging.DEBUG)\n _LOGGER.debug('Notification light was started')\n camera_path = '/dev/video1'\n camera = CameraMonitor(camera_path)\n camera.watch()\n\nif __name__ == '__main__':\n '''\n The default thing to do.\n '''\n with daemon.DaemonContext():\n _main()\n" } ]
4
jlchapman/Weather
https://github.com/jlchapman/Weather
50abe493e72628244cadb50d9bc450b2eb227111
3e263581a0e1bded1fa5417d8f623f37068fe98e
eb3d956d700c52fd160dae8adef15c1ea677c065
refs/heads/master
2020-12-30T10:36:34.252720
2014-09-01T15:59:24
2014-09-01T15:59:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4983796179294586, "alphanum_fraction": 0.5282407402992249, "avg_line_length": 29.624113082885742, "blob_id": "700e1e762ea27a363f8b7a2e51315ddc2c8bd77c", "content_id": "c33a39b1755726a2656c8207c260f081d655bbdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4320, "license_type": "no_license", "max_line_length": 163, "num_lines": 141, "path": "/WeatherCreateDB.py", "repo_name": "jlchapman/Weather", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport MySQLdb\nimport time\n#import Adafruit_BMP.BMP085 as BMP085\n\n\n\n#open the MySQL database\ndb = MySQLdb.connect(\"localhost\", \"sensors\", \"raptor50\", \"Weather\")\ncurs=db.cursor()\n\n#CREATE THE PRESSURE TABLE FOR PRESSURE AND TEMP FROM BMP085\n\n#print (time.strftime(\"%m/%d/%Y\"))\nvar0 = time.strftime(\"%Y/%m/%d:%H:%M:%S\")\nvar1 = time.strftime(\"%Y/%m/%d\")\nvar2 = time.strftime(\"%H:%M:%S\")\nvar3 = 25.5\nvar4 = 3280.84\nvar5 = 9999.01\nvar6 = 8888.01\n\nwith db:\n curs.execute(\"DROP TABLE IF EXISTS Pressure\")\n curs.execute(\"CREATE TABLE Pressure(Id INT PRIMARY KEY AUTO_INCREMENT,\\\n MeasTime DATETIME, DateColumn DATE,TimeColumn TIME,Temp DOUBLE(5,2), Altitude DOUBLE(8,2), Pressure DOUBLE(8,2), SeaLevelPress DOUBLE(8,2) ) \")\n\n curs.execute (\"\"\"\n INSERT INTO Pressure (MeasTime,DateColumn,TimeColumn,Temp,Altitude,Pressure,SeaLevelPress)\n VALUES\n (%s, %s, %s, %s, %s, %s, %s)\n \"\"\",(var0,var1,var2,var3,var4,var5,var6) )\n\ncurs.execute (\"SELECT * FROM Pressure\")\n\n\n\nprint \"\\n\\n\\nId MeasTime Date Time Temperature Altitude Pressure SeaLevelPressure\"\nprint \"=========================================================================================================================\"\n\nfor reading in curs.fetchall():\n print str(reading[0])+\" \"+str(reading[1])+\" \"+str(reading[2])+\" \"+str(reading[3])+\" \"+str(reading[4])+\" \"+str(reading[5])\\\n +\" \"+str(reading[6])+\" \"+str(reading[7])\n\n\n#CREATE THE TEMPTMP35 TABLE FOR TEMP\n\nvar3 = 25.5\n\nwith db:\n curs.execute(\"DROP TABLE IF EXISTS TempTMP35\")\n curs.execute(\"CREATE TABLE TempTMP35(Id INT PRIMARY KEY AUTO_INCREMENT,\\\n MeasTime DATETIME, DateColumn DATE,TimeColumn TIME, Temperature DOUBLE(5,2) ) \")\n\n curs.execute (\"\"\"\n INSERT INTO TempTMP35 (MeasTime,DateColumn,TimeColumn,Temperature)\n VALUES\n (%s, %s, %s, %s)\n \"\"\",(var0,var1,var2,var3) )\n\ncurs.execute (\"SELECT * FROM TempTMP35\")\n\n\n\nprint \"\\n\\n\\nId MeasTime Date Time Temperature \"\nprint \"============================================================================\"\n\nfor reading in curs.fetchall():\n print str(reading[0])+\" \"+str(reading[1])+\" \"+str(reading[2])+\" \"+str(reading[3])+\" \"+str(reading[4])\n\n\n\n\n\n\n#CREATE THE HUMIDITY SENSOR TABLE FOR HUMIDITY\n\nvar3 = 24.5\nvar4 = 36.80\nvar5 = 23.8\n\nwith db:\n curs.execute(\"DROP TABLE IF EXISTS Humidity\")\n curs.execute(\"CREATE TABLE Humidity(Id INT PRIMARY KEY AUTO_INCREMENT,\\\n MeasTime DATETIME, DateColumn DATE,TimeColumn TIME, HTemperature DOUBLE(5,2), Humidity DOUBLE(6,2) ) \")\n\n curs.execute (\"\"\"\n INSERT INTO Humidity (MeasTime,DateColumn,TimeColumn,HTemperature,Humidity)\n VALUES\n (%s, %s, %s, %s, %s)\n \"\"\",(var0,var1,var2,var3,var4) )\n\ncurs.execute (\"SELECT * FROM Humidity\")\n\n\n\nprint \"\\n\\n\\nId MeasTime Date Time Humidity Temp Humidity \"\nprint \"============================================================================================\"\n\nfor reading in curs.fetchall():\n print str(reading[0])+\" \"+str(reading[1])+\" \"+str(reading[2])+\" \"+str(reading[3])+\" \"+str(reading[4])+\" \"+str(reading[5])\n\n\n\n\n#CREATE THE RAIN GUAGE TABLE FOR RAIN\n\nvar3 = .01\n\nwith db:\n curs.execute(\"DROP TABLE IF EXISTS Rain\")\n curs.execute(\"CREATE TABLE Rain(Id INT PRIMARY KEY AUTO_INCREMENT,\\\n MeasTime DATETIME, DateColumn DATE,TimeColumn TIME, RainSample DOUBLE(5,2) ) \")\n\n curs.execute (\"\"\"\n INSERT INTO Rain (MeasTime,DateColumn,TimeColumn,RainSample)\n VALUES\n (%s, %s, %s, %s)\n \"\"\",(var0,var1,var2,var3) )\n\ncurs.execute (\"SELECT * FROM Rain\")\n\n\n\nprint \"\\n\\n\\nId MeasTime Date Time Rain \"\nprint \"=====================================================================\"\n\nfor reading in curs.fetchall():\n print str(reading[0])+\" \"+str(reading[1])+\" \"+str(reading[2])+\" \"+str(reading[3])+\" \"+str(reading[4])\n\n\n\n\n\n\n\n\n\n\ndb.close()\n\n\n" } ]
1
angiemapa/RNACebolla
https://github.com/angiemapa/RNACebolla
f6bc376bd2a0ec89f061bad5154538c0a1687ea2
b6f56b5cb2266f6084251d8130548d33ba9b0a8f
9938329f0add3dd583d787053d339ee598f61566
refs/heads/master
2022-04-24T09:57:14.146314
2020-04-28T04:23:57
2020-04-28T04:23:57
259,413,633
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7035788297653198, "alphanum_fraction": 0.7318464517593384, "avg_line_length": 37.18811798095703, "blob_id": "87adfdc640f7ee3125c0fd18407dd4bfa86e2281", "content_id": "4a75a58eff36c74c4c5206261201c624999c110f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3857, "license_type": "no_license", "max_line_length": 88, "num_lines": 101, "path": "/RNACebolla.py", "repo_name": "angiemapa/RNACebolla", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.applications.mobilenet import preprocess_input\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dropout, Flatten, Dense, Activation\nfrom tensorflow.keras.layers import Convolution2D, MaxPooling2D, Conv2D\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras import backend as k\n#from mlxtend.evaluate import confusion_matrix\nimport matplotlib.image as mping\nimport matplotlib.pyplot as plt\nfrom tensorflow.python.keras.optimizers import Adam \n\nk.clear_session()\n#colocamos la ruta donde esta la carpeta de las imagenes\npat = (r\".\\prueba\")\ndatos_entrenamiento = (r\".\\Validacion\\faseA\")\ndatos_validacion = (r\".\\Validacion\\faseC\")\n\nepocas = 50\nlongitud, altura = 150,150#redimencionar el tamaño de la imagen\nbatch_size = 10 #cantidad de imagenes que procesa a la vez\nfiltrosConv1 = 32 #numero de filtros que aplicamos tas la primera capa\nfiltrosConv2 = 64 #numero de filtros que aplicamos tras la segunda capa2\nsize_filtro1 = (3,3)#para primera convolucion\nsize_filtro2 = (2,2)\nsize_pool = (2,2)#para mejorar el vance de la convolucio\nnetapas = 2 #debemos colocar todas las etapcas que vamos a evaluar de nuestro planta \nlr = 0.0004 #tendremo que ir probando con el error para ver cual esta el mejor resultado\n\n#Restructurando nuestos datos de imagenes\nentrenamiento_restructurada = ImageDataGenerator(\n rescale = 1./255, #rescalamos los pixeles de la imagen entre 0-1\n shear_range = 0.3, #inclinar imagenes\n zoom_range = 0.3, #Porciones de imagenes\n horizontal_flip = True)\n\nvalidacion_restructurada = ImageDataGenerator(\n rescale = 1./255)\n\n#abrir y alistar todo la carpeta de entrenamiento\nimagen_entrenamiento = entrenamiento_restructurada.flow_from_directory(\n datos_entrenamiento,\n target_size= (altura,longitud),\n batch_size = batch_size,\n class_mode = 'categorical')\n\nimagen_validacion = validacion_restructurada.flow_from_directory(\n datos_validacion,\n target_size= (altura,longitud),\n batch_size = batch_size,\n class_mode = 'categorical')\nprint(imagen_entrenamiento.class_indices)\n\npasos_entrenamiento = imagen_entrenamiento.n//imagen_entrenamiento.batch_size\npasos_validacion = imagen_validacion.n//imagen_validacion.batch_size\nprint(pasos_entrenamiento,pasos_validacion)\n\n#Creamos la red neuronal convolucional\n\ncnn = Sequential()\ncnn.add(Convolution2D(64, #cambie 64\n kernel_size=(3, 3), \n padding ='same',\n input_shape=(longitud,altura,3),\n activation='relu'))\ncnn.add(MaxPooling2D(pool_size=(2, 2)))\n\ncnn.add(Convolution2D(128, kernel_size=(3, 3), activation='relu')) #128\ncnn.add(MaxPooling2D(pool_size=(2, 2)))\ncnn.add(Convolution2D(256, kernel_size=(3, 3), activation='relu')) #256\ncnn.add(MaxPooling2D(pool_size=(2, 2)))\ncnn.add(Convolution2D(512, kernel_size=(3, 3), activation='relu'))\ncnn.add(MaxPooling2D(pool_size=(2, 2)))\ncnn.add(Convolution2D(1024, kernel_size=(3, 3), activation='relu'))\ncnn.add(MaxPooling2D(pool_size=(2, 2)))\n\ncnn.add(Flatten())\ncnn.add(Dense(256, activation='relu'))\ncnn.add(Dropout(0.5))\ncnn.add(Dense(netapas, activation='softmax')) \n\ncnn.compile(loss='categorical_crossentropy',\n optimizer='sgd', \n metrics=['accuracy'])\n\nH = cnn.fit_generator(imagen_entrenamiento,\n steps_per_epoch=pasos_entrenamiento,\n epochs=epocas,\n validation_data=imagen_validacion,\n validation_steps=pasos_validacion)\n\n\n#ruta carpeta para guardar modelo\ntarget_dir = r'.\\modelo'\n#si no existe, lo guarda\nif not os.path.exists(target_dir):\n os.mkdir(target_dir)\ncnn.save(r'.\\modelo\\modelo.h5') #nombre modelo\ncnn.save_weights(r'.\\modelo\\pesos.h5') #nombre pesos" }, { "alpha_fraction": 0.638115644454956, "alphanum_fraction": 0.6713061928749084, "avg_line_length": 26.363636016845703, "blob_id": "2903286c3b97aed17cda9276fa926ba75cee72c3", "content_id": "9f3c980a5ffd840701d0636d03fb92e8b94f876a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 934, "license_type": "no_license", "max_line_length": 76, "num_lines": 33, "path": "/predict.py", "repo_name": "angiemapa/RNACebolla", "src_encoding": "UTF-8", "text": "from tensorflow.keras.preprocessing.image import load_img, img_to_array\r\nfrom tensorflow.keras.models import load_model\r\n\r\n\r\nlongitud, altura = 150,150\r\nmodelo = r'C:\\Users\\PAOLITA\\Documents\\Pao\\URL\\2020\\IA\\modelo\\modelo.h5'\r\npesos_modelo = r'C:\\Users\\PAOLITA\\Documents\\Pao\\URL\\2020\\IA\\modelo\\pesos.h5'\r\ncnn = load_model(modelo)\r\ncnn.load_weights(pesos_modelo)\r\n\r\ndef predict(file,answer):\r\n x = load_img(file, target_size=(longitud, altura))\r\n x = img_to_array(x)\r\n x = np.expand_dims(x, axis=0)\r\n array = cnn.predict(x)\r\n result = array[0]\r\n asnwer = np.argmax(result)*10\r\n \r\n \r\n if answer <20:\r\n print(\"pred: cebolla en fase1\")\r\n elif answer <30:\r\n print(\"pred: cebolla en fase2\")\r\n elif answer <40:\r\n print(\"pred: cebolla en fase3\")\r\n else:\r\n print(\"no es ceboolla\")\r\n\r\n return answer\r\n\r\nnum = 31 #numero de la imagen\r\npredict(\"C:\\\\Users\\\\PAOLITA\\\\Pictures\\\\prueba\\\\\"+str(num)+\".jpg\",num)\r\ncnn.summary()" } ]
2
a74nh/tastybrianz
https://github.com/a74nh/tastybrianz
2fb59321275f082984c91db679bd56664f666959
1a781050246c52df1a95d21706d634490df20d38
55824ffd4c2612fe75b468be304a4886008aa611
refs/heads/main
2023-02-08T02:17:40.623353
2021-01-01T13:03:52
2021-01-01T13:04:52
325,973,028
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6578784584999084, "alphanum_fraction": 0.6644695997238159, "avg_line_length": 28.785276412963867, "blob_id": "271938b698b3dc3016f92e5d47af544fd87e782b", "content_id": "0be7cbbc2601b18404659510112f04f23f54ecdc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4855, "license_type": "permissive", "max_line_length": 116, "num_lines": 163, "path": "/brainz_series.py", "repo_name": "a74nh/tastybrianz", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport yaml\nfrom dataclasses import dataclass, field\nfrom typing import List, Dict\nimport json\nfrom tabulate import tabulate\nimport argparse\nfrom pathlib import Path\nimport musicbrainzngs\n\nid_file=\"brainz.yaml\"\ncache_dir=\".cache\"\n\n@dataclass\nclass Series:\n\tseries_id: str = \"\"\n\tseries_key: str = \"\"\n\tdata: Dict[str, str] = field(default_factory = lambda: ({}))\n\ttable: List[List[str]] = field(default_factory = lambda: ([]))\n\theaders: List[str] = field(default_factory = lambda: ([]))\n\n\tdef __post_init__(self):\n\t\tmusicbrainzngs.set_useragent(\"tastybrainz\", \"0.1\", \"https://github.com/a74nh/\")\n\t\tself.__get_series_data()\n\n\tdef __get_series_data(self):\n\t\tPath(cache_dir).mkdir(parents=True, exist_ok=True)\n\t\tcache_file=f'{cache_dir}/{self.series_id}.json'\n\t\ttry:\n\t\t\twith open(cache_file) as f:\n\t\t\t\tself.data = json.load(f)\n\t\t\tself.relations = self.data[\"release_group-relation-list\"]\n\t\texcept FileNotFoundError:\n\t\t\tprint('Reading series from musicbrainz:',self.series_id)\n\t\t\tself.data = musicbrainzngs.get_series_by_id(self.series_key, includes=[\"release-group-rels\"])[\"series\"]\n\t\t\tself.relations = self.data[\"release_group-relation-list\"]\n\t\t\tself.__get_artist_credits()\n\t\t\twith open(cache_file, 'w') as f:\n\t\t\t\tjson.dump(self.data, f)\n\n\tdef __get_artist_credits(self):\n\t\tdatarel=self.relations\n\t\tfor rel in datarel:\n\t\t\ti=rel[\"release-group\"][\"id\"]\n\t\t\tcache_file=f'{cache_dir}/release-group-{i}.json'\n\t\t\ttry:\n\t\t\t\twith open(cache_file) as f:\n\t\t\t\t\tcredit = json.load(f)\n\t\t\texcept FileNotFoundError:\n\t\t\t\tprint('Reading artists from musicbrainz:',rel[\"release-group\"][\"title\"])\n\t\t\t\tcredit=musicbrainzngs.get_release_group_by_id(i, includes=[\"artist-credits\"])\n\t\t\t\twith open(cache_file, 'w') as f:\n\t\t\t\t\tjson.dump(credit, f)\n\t\t\trel[\"release-group\"][\"artist-credit\"] = credit[\"release-group\"][\"artist-credit\"]\n\t\t\trel[\"release-group\"][\"artist-credit-phrase\"] = credit[\"release-group\"][\"artist-credit-phrase\"]\n\n\tdef __repr__(self):\n\t\treturn f'{self.data[\"name\"]}'\n\n\tdef __generate_row_headers(self,other_lists):\n\t\tself.headers=[\"num\", \"title\", \"artist\", \"year\"]\n\t\tfor other in other_lists:\n\t\t\tself.headers.append(other.series_id)\n\n\tdef generate_table(self,other_lists,max_len):\n\t\tself.__generate_row_headers(other_lists)\n\t\tret=[]\n\t\tself.table = [None]*len(self.relations)\n\t\tself.counts = [0]*len(other_lists)\n\n\t\tfor rel in self.relations:\n\t\t\tkey=int(rel['ordering-key'])\n\t\t\tg=rel[\"release-group\"]\n\t\t\ttitle=g['title'][0:max_len]\n\t\t\tartist=g[\"artist-credit-phrase\"][0:max_len]\n\t\t\tyr=g['first-release-date'].split(\"-\")[0]\n\t\t\trow = [key,title,artist,yr]\n\n\t\t\tc=0\n\t\t\tfor other in other_lists:\n\t\t\t\tother_rel=other.__find_relation(g[\"id\"])\n\t\t\t\ttry:\n\t\t\t\t\tother_key=int(other_rel['ordering-key'])\n\t\t\t\t\tdiff=other_key-key\n\t\t\t\t\tif diff==0:\n\t\t\t\t\t\tdiff=\"-\"\n\t\t\t\t\telif diff<0:\n\t\t\t\t\t\tdiff=\"v\"+str(-diff)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdiff=\"^\"+str(diff)\n\t\t\t\t\tself.counts[c]=self.counts[c]+1\n\t\t\t\texcept TypeError:\n\t\t\t\t\tdiff=\"x\"\n\t\t\t\trow.append(diff)\n\t\t\t\tc=c+1\n\n\t\t\tself.table[key-1] = row\n\n\tdef __find_relation(self,fid):\n\t\tfor r in self.relations:\n\t\t\tif r[\"release-group\"][\"id\"]==fid:\n\t\t\t\treturn r\n\t\treturn None\n\n\tdef sort_by_column(self,col):\n\t\tcolval=self.headers.index(col)\n\t\tif colval<4:\n\t\t\tself.table.sort(key=lambda x: x[colval])\n\t\telse:\n\t\t\tdef difftonum(x):\n\t\t\t\tif x==\"x\":\n\t\t\t\t\treturn 9999999\n\t\t\t\telif x==\"-\":\n\t\t\t\t\treturn 0\n\t\t\t\telif x[0]==\"^\":\n\t\t\t\t\treturn int(x[1:])\n\t\t\t\telif x[0]==\"v\":\n\t\t\t\t\treturn -int(x[1:])\n\t\t\t\traise ValueError\n\t\t\tself.table.sort(key=lambda x: difftonum(x[colval]))\n\n\n\tdef tabulate(self,tablefmt):\n\t\tt=self.table\n\t\tif self.counts:\n\t\t\tt=t+[[\"totals\",\"\",\"\",\"\"]+self.counts]\n\t\treturn f'{self.data[\"name\"]}\\n{tabulate(t,tablefmt=tablefmt,headers=self.headers)}'\n\ndef load_config():\n\twith open(id_file, 'r') as f:\n\t\treturn yaml.safe_load(f)\n\ndef generate_table(series_id, compare_ids=[], truncate=None, id_config=None):\n\tif not id_config:\n\t\tid_config = load_config()\n\tbase_list=Series(series_id,id_config[series_id])\n\tother_list=[Series(i,id_config[i]) for i in compare_ids]\n\tbase_list.generate_table(other_list,truncate)\n\treturn base_list\n\ndef main():\n\tid_config=load_config()\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-s\", \"--sort\", help=\"sort by column\", type=str)\n\tparser.add_argument(\"--style\", help=\"tabulate style\", type=str, default=\"orgtbl\")\n\tparser.add_argument(\"-t\", \"--truncate\", help=\"truncate entries\", type=int, default=None)\n\tparser.add_argument('id', nargs=\"?\", help=\"id of series to show\", choices=[x for x in id_config], default=\"rs2020\")\n\tparser.add_argument('compare_ids', nargs='*', help=\"ids of series to compare against\")\n\targs = parser.parse_args()\n\n\tbase_list = generate_table(args.id,args.compare_ids,args.truncate,id_config)\n\tif args.sort:\n\t\tbase_list.sort_by_column(args.sort)\n\n\ttry:\n\t\tprint(base_list.tabulate(args.style))\n\texcept (BrokenPipeError, IOError):\n\t\tpass\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.4006265103816986, "alphanum_fraction": 0.4563174247741699, "avg_line_length": 37.30666732788086, "blob_id": "82fa346c42892d98d90ffb9ea6c2fc8ba529193f", "content_id": "48478b67d87929d5e89f098e614964f7754f5376", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2877, "license_type": "permissive", "max_line_length": 125, "num_lines": 75, "path": "/README.md", "repo_name": "a74nh/tastybrianz", "src_encoding": "UTF-8", "text": "# tastybrianz\nplaying around with musicbrainz\n\n\n# Running as script\n\nbrainz_series.py\n* Get a series from musicbrainz.\n* Downloaded data is saved to a cache directory.\n* Optionally add additional lists as difference columns.\n* Optionally sort the data.\n* Format data as a tabulate table and print.\n\n```\n usage: brainz_series.py [-h] [-s SORT] [--style STYLE] [-t TRUNCATE] [{rs2012,rs2020,guardian100,jaguaro}] [compare_ids ...]\n\npositional arguments:\n {rs2012,rs2020,guardian100,jaguaro}\n id of series to show\n compare_ids ids of series to compare against\n\noptional arguments:\n -h, --help show this help message and exit\n -s SORT, --sort SORT sort by column\n --style STYLE tabulate style\n -t TRUNCATE, --truncate TRUNCATE\n truncate entries\n```\n\n## Example\n```\nbrainz_series.py -t 30 rs2020 rs2012\n```\n\n```\nRolling Stone: 500 Greatest Albums of All Time: 2020 edition\n| num | title | artist | year | rs2012 |\n|--------+--------------------------------+--------------------------------+--------+----------|\n| 1 | What’s Going On | Marvin Gaye | 1971 | ^5 |\n| 2 | Pet Sounds | The Beach Boys | 1966 | - |\n| 3 | Blue | Joni Mitchell | 1971 | ^27 |\n| 4 | Songs in the Key of Life | Stevie Wonder | 1976 | ^53 |\n| 5 | Abbey Road | The Beatles | 1969 | ^9 |\n| 6 | Nevermind | Nirvana | 1991 | ^11 |\n| 7 | Rumours | Fleetwood Mac | 1977 | ^19 |\n| 8 | Purple Rain | Prince and The Revolution | 1984 | ^68 |\netc...\n```\n\n```\nbrainz_series.py -t 30 --sort year --style plain guardian100\n```\n\n```\nThe Guardian 100 Best Albums Ever\n num title artist year\n 94 Songs for Swingin’ Lovers! Frank Sinatra 1956\n 14 Kind of Blue Miles Davis 1959\n 11 Highway 61 Revisited Bob Dylan 1965\n 45 A Love Supreme John Coltrane 1965\n 2 Revolver The Beatles 1966\n 6 Pet Sounds The Beach Boys 1966\n 24 Blonde on Blonde Bob Dylan 1966\n 8 The Velvet Underground & Nico The Velvet Underground & Nico 1967\netc...\n```\n\n# Using as module\n\n```\nimport brainz_series\nt=brainz_series.generate_table(\"rs2020\",[\"rs2012\"],30)\nt.sort_by_column(\"year\")\nprint(t.tabulate(\"simple\"))\n```\n" } ]
2
AnaisG14/hello-world
https://github.com/AnaisG14/hello-world
5b21649513fed5f374f176183a8ab740e6f55525
8deae3d401cc00aea36aa56fab9ba644c361729f
8c593e5f944920caf7ebded47f547a55dd9d1165
refs/heads/master
2021-08-28T16:04:51.958914
2021-08-18T12:20:05
2021-08-18T12:20:05
82,804,674
0
0
null
2017-02-22T13:00:24
2020-12-14T20:23:34
2020-12-17T15:08:18
Python
[ { "alpha_fraction": 0.7830188870429993, "alphanum_fraction": 0.798113226890564, "avg_line_length": 65.25, "blob_id": "4a7cc93bdd2f77f82326d84c8117bdee47a63eda", "content_id": "62fc129d6d2d53fa4c194fc3fc6315fbefb9aec8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 542, "license_type": "no_license", "max_line_length": 160, "num_lines": 8, "path": "/README.md", "repo_name": "AnaisG14/hello-world", "src_encoding": "UTF-8", "text": "# Salut tout le monde/hello world \nnouveau repository pour débuter\nJe me lance depuis peu dans le code, après html5 et css3, me voilà partie pour php. première leçon, utiliser github et créer des commits.\n2ème changement pour voir\nmodif directement sur mon ordinateur et on voit ce qui se passe (modif dans la console avec vim\n\ndécembre 2020 : quelques années sans utiliser git, révisions et nouveau test. Aujourd'hui, après création de 2 sites en php, apprentissage de du langage python.\nC'est parti pour de nouvelles aventures.\n" }, { "alpha_fraction": 0.6520270109176636, "alphanum_fraction": 0.6925675868988037, "avg_line_length": 21.769229888916016, "blob_id": "a89021e5750da111b3a8bb912f418a8f859bdd5a", "content_id": "b11eb1e9f6bcbb814b56820130529e875a95bbc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "no_license", "max_line_length": 58, "num_lines": 13, "path": "/hello.py", "repo_name": "AnaisG14/hello-world", "src_encoding": "UTF-8", "text": "\"\"\" branche master\"\"\"\n\nprint(\"hello everybody\")\nprint(\"We are the 14th of December of 2020\")\n\nname = input(\"Entrez votre prénom: \")\nprint(\"Bonjour {}, voici une première modif\".format(name))\n\ncp = input(\"Entrez votre code postal: \")\nprint(cp)\n\nprint(\"Today is Wednesday, 18/08/21\")\nprint(\"Test\")\n" } ]
2
arzhuch/python-advanced
https://github.com/arzhuch/python-advanced
e0737611ec8682685d0f8e7dfc6d5486305e6a5f
ea4edbf4623351a4a23b393aac0c78a2f29b3701
cf9a6d97597785486ded38a44f8b32eaa95e252a
refs/heads/master
2022-12-28T19:41:48.711349
2020-09-19T20:06:59
2020-09-19T20:06:59
291,809,746
0
0
null
2020-08-31T19:47:01
2020-09-19T20:07:02
2020-10-04T23:03:37
Python
[ { "alpha_fraction": 0.6012324094772339, "alphanum_fraction": 0.6188380122184753, "avg_line_length": 31.457143783569336, "blob_id": "a8cbad6912e1c3c1d8200ecfdfb25c11d558589f", "content_id": "a0484e530ffd06b20adc3765f4bbe06fc6f60af6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1533, "license_type": "no_license", "max_line_length": 76, "num_lines": 35, "path": "/lesson1/bank.py", "repo_name": "arzhuch/python-advanced", "src_encoding": "UTF-8", "text": "'''\n4) Реализовать функцию bank, которая приннимает следующие\nаргументы: сумма депозита, кол-во лет, и процент. Результатом\nвыполнения должна быть сумма по истечению депозита\n'''\nfrom time import sleep\n\nuser_amount = int(input('Вас вітає ПривітБанк! Введіть суму депозиту: '))\nuser_years = int(input('На скільки років?: '))\nuser_rate = int(input('Введіть ставку у процентах (наприклад, 5, 19): '))\n\n\n# note: ставка в процентах, т.е 5, 19, а не 0.05, 0.19\ndef bank(amount, years, rate):\n if 0 not in [amount, years, rate]:\n print(f'Отже, сума депозиту: {amount}\\n')\n print(f'Кількість років {years}\\n')\n print(f'Процентна ставка: {rate}\\n')\n\n for i in range(years):\n amount += amount*(rate/100)\n\n print('Зачекайте, будь ласка... Обраховую суму...')\n print('...\\n')\n sleep(1)\n print('...\\n')\n sleep(1)\n print('...\\n')\n sleep(1)\n print('...\\n')\n print(f'Ваша фінальна сума становитиме {amount} гривень!')\n else:\n print('На жаль, ви не можете оформити депозит із такими умовами :(')\n\nbank(user_amount, user_years, user_rate)\n" }, { "alpha_fraction": 0.6595744490623474, "alphanum_fraction": 0.673252284526825, "avg_line_length": 31.875, "blob_id": "753c531298ae09f96d89d5c93eb52f326c96356a", "content_id": "73934d2d4da2a35674f2b54ff18d653e0a549814", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1598, "license_type": "no_license", "max_line_length": 82, "num_lines": 40, "path": "/lesson2/store_chain.py", "repo_name": "arzhuch/python-advanced", "src_encoding": "UTF-8", "text": "'''\n2) Создать класс магазина. Конструктор должен инициализировать\nзначения: «Название магазина» и «Количество проданных\nтоваров». Реализовать методы объекта, которые будут увеличивать\nкол-во проданных товаров, и реализовать вывод значения\nпеременной класса, которая будет хранить общее количество\nтоваров проданных всеми магазинами.\n'''\n\n\nclass Store:\n chain_sales = 0\n\n def __init__(self, store_name, sales_count=0):\n self.store_name = store_name\n self.sales_count = sales_count\n\n Store.chain_sales += sales_count\n\n def retail_sale(self, units_count):\n increment = units_count * 10\n self.sales_count += increment\n Store.chain_sales += increment\n print(f'{units_count} units 10 items each sold at {self.store_name}')\n\n def wholesale(self, pallets_count):\n increment = pallets_count * 100\n self.sales_count += increment\n Store.chain_sales += increment\n print(f'{pallets_count} pallets 100 items each sold at {self.store_name}')\n\n\nif __name__ == '__main__':\n silpo = Store('Silpo', 0)\n silpo.retail_sale(10)\n print(f'Silpo local sales: {silpo.sales_count}')\n auchan = Store('Auchan', 0)\n auchan.wholesale(5)\n print(f'Auchan local sales: {auchan.sales_count}')\n print(f'Global sales: {Store.chain_sales}')\n\n" }, { "alpha_fraction": 0.6678280830383301, "alphanum_fraction": 0.6689895391464233, "avg_line_length": 25.90625, "blob_id": "1bc3ed615c7153f029170d837bff9b15f0ac6594", "content_id": "2b8ba8749b97cf6bc08c4813770ce099fa7d9feb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1075, "license_type": "no_license", "max_line_length": 84, "num_lines": 32, "path": "/lesson1/country_dict.py", "repo_name": "arzhuch/python-advanced", "src_encoding": "UTF-8", "text": "'''\n2) Создать словарь Страна:Столица. Создать список стран. Не все\nстраны со списка должны сходиться с названиями стран со словаря. С\nпомощою оператора in проверить на вхождение элемента страны в\nсловарь, и если такой ключ действительно существует вывести\nстолицу.\n'''\nfrom pprint import pprint\n\ncountry_dict = {\n \"Ukraine\": \"Kyiv\",\n \"Poland\": \"Warsaw\",\n \"Slovakia\": \"Bratislava\",\n \"Turkey\": \"Istanbul\",\n \"USA\": \"Washington\",\n \"UK\": \"London\",\n \"Germany\": \"Berlin\"\n}\n\nprint(f\"\\nDict: \\n\")\npprint(country_dict)\n\n\ncountry_list = [\"Poland\", \"Turkey\", \"Spain\", \"Portugal\", \"USA\", \"Germany\", \"Brazil\"]\n\nprint(f\"\\nList: \\n {country_list}\\n\")\n\nfor country in country_list:\n if country in country_dict:\n print(f\"{country_dict.get(country)} is the capital of {country}\")\n else:\n print(f\"{country}: no such country in the list!\")\n" }, { "alpha_fraction": 0.5151860117912292, "alphanum_fraction": 0.5174639225006104, "avg_line_length": 23.165138244628906, "blob_id": "acfc005508dd9001885f3bd5e8bb976b10ff619b", "content_id": "5468ace2683e245a53f531847d28fa48113f9c24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2634, "license_type": "no_license", "max_line_length": 115, "num_lines": 109, "path": "/lesson2/dot.py", "repo_name": "arzhuch/python-advanced", "src_encoding": "UTF-8", "text": "class Dot:\n\n def __init__(self, x, y, z):\n\n self._x = x\n self._y = y\n self._z = z\n\n class Decorators:\n\n @classmethod\n def type_checker(cls, _type):\n\n def proxy_decorator(decorated):\n\n def wrapper(self, checked_value):\n print('decoration')\n if not isinstance(checked_value, _type):\n raise ValueError(f'Unsupported type passed while interaction with Dots: requiered {_type}')\n result = decorated(self, checked_value)\n print('successful decoration')\n return result\n\n return wrapper\n\n return proxy_decorator\n\n def get_x(self):\n return self._x\n\n @Decorators.type_checker((int, float))\n def set_x(self, value):\n self._x = value\n\n def get_y(self):\n return self._y\n\n @Decorators.type_checker((int, float))\n def set_y(self, value):\n self._y = value\n\n def get_z(self):\n return self._z\n\n @Decorators.type_checker((int, float))\n def set_z(self, value):\n self._z = value\n\n # @Decorators.type_checker('Dot') - //TODO how to handle this ?\n def __add__(self, other):\n\n new_x = self._x + other._x\n new_y = self._y + other._y\n new_z = self._z + other._z\n\n print('Returning sum of two Dots:\\n', new_x, new_y, new_z)\n\n return Dot(new_x, new_y, new_z)\n\n # @Decorators.type_checker(Dot)\n def __mul__(self, other):\n\n new_x = self._x * other._x\n new_y = self._y * other._y\n new_z = self._z * other._z\n\n print('Returning product of two Dots:\\n', new_x, new_y, new_z)\n\n return Dot(new_x, new_y, new_z)\n\n # @Decorators.type_checker(Dot)\n def __sub__(self, other):\n\n new_x = self._x - other._x\n new_y = self._y - other._y\n new_z = self._z - other._z\n\n print('Returning substraction result of two Dots:\\n', new_x, new_y, new_z)\n\n return Dot(new_x, new_y, new_z)\n\n # @Decorators.type_checker(Dot)\n def __truediv__(self, other):\n\n new_x = self._x / other._x\n new_y = self._y / other._y\n new_z = self._z / other._z\n\n print('Returning quotient of two Dots:\\n', new_x, new_y, new_z)\n\n return Dot(new_x, new_y, new_z)\n\n # @Decorators.type_checker(Dot)\n def __neg__(self):\n\n new_x = - self._x\n new_y = - self._y\n new_z = - self._z\n\n print('Returning negative Dot:\\n', new_x, new_y, new_z)\n\n return Dot(new_x, new_y, new_z)\n\n\na = Dot(1, 3, 5)\na.set_x(3)\nb = 10\nc = a + b\nprint(type(a))\n" }, { "alpha_fraction": 0.5812395215034485, "alphanum_fraction": 0.5929648280143738, "avg_line_length": 27.428571701049805, "blob_id": "c9e6df4e90a367cc9e2e18c2a7b8d7140f98960d", "content_id": "7968722ebe31655579cab61168ef7cd957565720", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2096, "license_type": "no_license", "max_line_length": 80, "num_lines": 63, "path": "/lesson3/counting_decorator.py", "repo_name": "arzhuch/python-advanced", "src_encoding": "UTF-8", "text": "'''Создать декоратор, который принимает на вход аргумент «количество\nповторений». Который будет вызывать функцию, определенное кол-во\nраз. Декорируемая функция должна возвращать:\n1) Количество времени затраченное на каждый вызов;\n2) Количество времени затраченное в общей сложности на все\nвызовы;\n3) Имя декорируемой функции;\n4) Значение последнего результата выполнения.\n'''\n\nfrom time import time\nfrom pprint import pprint\n\ndef repetition(number_of_calls):\n\n def proxy_decorator(func):\n\n def wrapper(*args, **kwargs):\n\n total_execution_time = 0\n calls_time_data = {}\n call_counter = 0\n\n if not number_of_calls > 0:\n raise ValueError(\"you can't call function zero times!\")\n\n for i in range(number_of_calls):\n call_counter += 1\n\n print('function started...')\n start = time()\n result = func(*args, **kwargs)\n end = time()\n print('function ended')\n\n singe_execution_time = end - start\n calls_time_data[f'Call # {call_counter}'] = singe_execution_time\n total_execution_time += singe_execution_time\n\n return {\n 'function result': result,\n 'function name': func.__name__,\n 'total execution_time': total_execution_time,\n 'execution time per call': calls_time_data,\n 'last execution time': singe_execution_time,\n }\n\n return wrapper\n\n return proxy_decorator\n\n\n@repetition(2)\ndef my_func(a, b, c=0):\n return a + b + c\n\npprint(my_func(10, 20, 30))\n\n@repetition(0)\ndef string_func(string1, string2):\n return string1 + string2\n\npprint(string_func('petro', 'poroshenko'))\n" }, { "alpha_fraction": 0.5735294222831726, "alphanum_fraction": 0.6078431606292725, "avg_line_length": 14.692307472229004, "blob_id": "92845618f1fc306254f7a5fc5c4ac3cf5ba35de7", "content_id": "b6bd9dec5bc501a0fc1b9d0b373e711d2ab8ad7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 54, "num_lines": 13, "path": "/lesson1/create_list.py", "repo_name": "arzhuch/python-advanced", "src_encoding": "UTF-8", "text": "'''\n1)Создать список из N элементов (от 0 до n с шагом 1).\nВ этом списке вывести все четные значения.\n'''\n\n\nlimit = 51\n\nmy_list = list(range(limit))\n\nfor i in my_list:\n if i % 2 == 0:\n print(i)\n" }, { "alpha_fraction": 0.622344970703125, "alphanum_fraction": 0.6299915313720703, "avg_line_length": 26.372093200683594, "blob_id": "9c455d1f5de5ae520b20f157a4a45be13048415f", "content_id": "9e3f34bc6d7af9d175ee97b2c477786a5c075659", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2559, "license_type": "no_license", "max_line_length": 88, "num_lines": 86, "path": "/lesson2/vehicles.py", "repo_name": "arzhuch/python-advanced", "src_encoding": "UTF-8", "text": "'''\n1) Создать класс автомобиля. Описать общие аттрибуты. Создать\nклассы легкового автомобиля и грузового. Описать в основном\nклассе базовые аттрибуты для автомобилей. Будет плюсом если в\nклассах наследниках переопределите методы базового класса.\n'''\nfrom time import sleep\n\n\nclass Vehicle:\n\n purpose = 'transportation'\n number_of_wheels = 4\n\n def __init__(self, manufacturer, color):\n\n self.manufacturer = manufacturer\n self.color = color\n self.loaded_with = []\n self.engine_started = False\n\n def start_engine(self, delay=0):\n self.engine_started = True\n print('Starting engine... brr brr brrrrrr!!!... ')\n sleep(delay)\n print(f'Engine started after {delay} seconds')\n\n def check_engine(self):\n print('Engine is working') if self.engine_started else print('Engine is silent')\n\n def load_into(self, stuff, *args):\n for a in args:\n print(f'{a} loaded into {self.color} {self.manufacturer}!')\n self.loaded_with.append(a)\n\n\nclass Truck(Vehicle):\n\n cargo_capacity_in_kg = 1500\n\n def change_capacity(self, delta):\n self.cargo_capacity_in_kg += delta\n print('New truck capacity is ', self.cargo_capacity_in_kg)\n\n def load_into(self, **kwargs): # designed to take item name and weight\n for item, weight in kwargs.items():\n print(f'Loaded {item}, weight = {weight} kg')\n self.loaded_with.append({item: weight})\n\n def check_load(self):\n print('Loaded with: ')\n print(self.loaded_with)\n\n\nclass Car(Vehicle):\n\n passanger_capacity = 4\n music_playing = None\n passangers_inside = ()\n\n def turn_on_music(self, song):\n self.music_playing = song\n print(song, 'is playing out of the car!')\n\n def load_into(self, **kwargs): # designed to take passanger's name and age\n for name, age in kwargs.items():\n print(f'{name}, {age} y.o., is inside the car')\n self.loaded_with.append({name: age})\n\n def check_load(self):\n print('Loaded with: \\n')\n for i in self.loaded_with:\n print(i)\n\n\n\n# just leaving it here\n\nmy_car = Car('Mustang', 'white')\nmy_truck = Truck('KAMAZ', 'blue')\n\nmy_car.load_into(artem=24, kostya=31, oleh=66)\nmy_car.check_load()\n\nmy_truck.load_into(furniture=50, food=35)\nmy_truck.check_load()\n" } ]
7
khalidzein/testrepo
https://github.com/khalidzein/testrepo
27bb8a946effb031b80a9fc58a38693b767dd7ae
0244b0ab302685fddcf8b4b3364239e223ee34a0
63c1b6d80478156fcdc886ab6c09f092496cdf35
refs/heads/main
2023-09-04T10:46:23.942197
2021-10-11T21:55:06
2021-10-11T21:55:06
416,074,739
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7241379022598267, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 28, "blob_id": "d7f279c61e38b7e1884875e44a70ba2f184bf488", "content_id": "099e1dbf779428eddb2c553487c25117f783ae2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/testchild.py", "repo_name": "khalidzein/testrepo", "src_encoding": "UTF-8", "text": "### Add file tp child branch\nprint(\"inside child branch\")\n" } ]
1
AntNowak/AttendanceMonitor
https://github.com/AntNowak/AttendanceMonitor
34229c919dd523f729578b428194bef78d45537f
fe54a1f37d954178b9c3f71de64c5e57faa2e581
d27d208771c95408c4f6a3e427e0725232138efa
refs/heads/master
2023-04-20T10:02:16.706240
2021-05-13T12:43:38
2021-05-13T12:43:38
336,783,727
1
1
null
2021-02-07T12:41:23
2021-05-13T11:46:32
2021-05-13T11:48:10
Python
[ { "alpha_fraction": 0.5045948028564453, "alphanum_fraction": 0.5249234437942505, "avg_line_length": 34.54081726074219, "blob_id": "edb1c24c0cb4627e556cf8dd17ec832194701a89", "content_id": "29b779f0acf656ca78c694ad7842eb0f082aadf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3591, "license_type": "no_license", "max_line_length": 112, "num_lines": 98, "path": "/AttendanceMonitor/train.py", "repo_name": "AntNowak/AttendanceMonitor", "src_encoding": "UTF-8", "text": "\r\nimport cv2\r\nimport random\r\nimport image_utils\r\nimport numpy as np\r\nfrom os import path, mkdir, listdir\r\n\r\nclass Train:\r\n\r\n def __init__ (self):\r\n self.cam = cv2.VideoCapture(0)\r\n\r\n def __del__(self):\r\n self.cam.release()\r\n\r\n #creating student directories based on supplied studentID\r\n def create_student_dir(self, student_id):\r\n user_data_dir = \"user_data\"\r\n if (not path.exists(user_data_dir)):\r\n mkdir(user_data_dir)\r\n user_dir = user_data_dir + \"/\" + student_id\r\n if (not path.exists(user_dir)):\r\n mkdir(user_dir)\r\n return user_dir\r\n\r\n def run_training(self):\r\n user_data_dir = \"user_data\"\r\n #finding user image directories\r\n user_dirs = listdir(user_data_dir)\r\n images = []\r\n ids = []\r\n #iterate the student IDs\r\n for u in user_dirs:\r\n user_files = listdir(user_data_dir + \"/\" + u)\r\n #iterating student images for IDs\r\n for f in user_files:\r\n image = cv2.imread(user_data_dir + \"/\" + u + \"/\" + f)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n ids.append(int(u))\r\n images.append(image)\r\n recogniser = cv2.face.LBPHFaceRecognizer_create()\r\n recogniser.train(images, np.array(ids))\r\n recogniser.write(\"student_train.yml\")\r\n\r\n #generate training set of student images, takes 30 images of the student when the face is detected\r\n def generate_training_set(self, student_id):\r\n student_dir = self.create_student_dir(student_id)\r\n number_of_images = 30\r\n saved_images = 0\r\n current_frame = 0\r\n\r\n while saved_images < number_of_images:\r\n ret, image = self.cam.read()\r\n if(current_frame % 10 == 0):\r\n if(not ret):\r\n return False\r\n image, bounding_box = image_utils.crop_and_greyscale(image)\r\n if(not image.size == 0):\r\n cv2.imwrite(student_dir + \"/\" + str(random.randint(0,50000))+ \".jpg\", image)\r\n print (\"image saved\")\r\n saved_images += 1\r\n current_frame += 1\r\n\r\nr = Train()\r\nr.generate_training_set(\"93\")\r\n#r.run_training()\r\n\r\n\r\n #def recognise_from_video():\r\n # recogniser = cv2.face.LBPHFaceRecognizer_create()\r\n # recogniser.read(\"student_train.yml\") \r\n # face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\n # cap = cv2.VideoCapture(0)\r\n\r\n #while True:\r\n # ret, frame = cap.read()\r\n # faces = face_cascade.detectMultiScale(frame, 1.3, 5)\r\n # for (x, y, w, h) in faces:\r\n # image_grey = cv2.cvtColor(frame[y: y + h, x: x + w], cv2.COLOR_BGR2GRAY)\r\n # image_grey = cv2.resize(image_grey, (200, 200))\r\n # id, confidence = recogniser.predict(image_grey)\r\n # cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)\r\n #student_info = \"Student ID: \" + str(id) + \" (\" + str(confidence) + \"%)\"\r\n #display text\r\n #if (confidence > 0):\r\n # cv2.putText(frame, student_info, (x, y + h), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)\r\n # display\r\n #cv2.imshow('frame', frame)\r\n\r\n # esc to stop\r\n #k = cv2.waitKey(30) & 0xff\r\n #if k==27:\r\n # break\r\n #cap.release()\r\n #cv2.destroyAllWindows()\r\n\r\n #recognise_from_video()\r\n #cv2.destroyAllWindows()\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.579071581363678, "alphanum_fraction": 0.6050354242324829, "avg_line_length": 29.825000762939453, "blob_id": "28a6dcb9c5e64d7994f467a97e4e88a263cceac2", "content_id": "d2b08c6f617c18485c033215b74bc2bb697af1fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1271, "license_type": "no_license", "max_line_length": 62, "num_lines": 40, "path": "/AttendanceMonitor/image_utils.py", "repo_name": "AntNowak/AttendanceMonitor", "src_encoding": "UTF-8", "text": "import cv2\r\nimport numpy as np\r\nHAAR_CASCADE_PATH = \"haarcascade_frontalface_default.xml\"\r\n\r\n\r\ndef crop_and_greyscale(image):\r\n cascade_class = cv2.CascadeClassifier(HAAR_CASCADE_PATH)\r\n #change to greyscale\r\n image_grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n #detecting faces\r\n faces = cascade_class.detectMultiScale(image_grey, 1.3, 5)\r\n\r\n if(len(faces) > 0):\r\n bounding_box = faces[0]\r\n (x, y, w, h) = bounding_box\r\n #crops\r\n cropped_image = image_grey[y: y + h, x: x + w]\r\n #resize\r\n cropped_image = cv2.resize(cropped_image, (200, 200))\r\n return cropped_image, bounding_box\r\n \r\n return np.array([]), None\r\n\r\ndef crop_colour(image):\r\n cascade_class = cv2.CascadeClassifier(HAAR_CASCADE_PATH)\r\n #change to greyscale\r\n image_grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n #detecting faces\r\n faces = cascade_class.detectMultiScale(image_grey, 1.3, 5)\r\n\r\n if(len(faces) > 0):\r\n bounding_box = faces[0]\r\n (x, y, w, h) = bounding_box\r\n #crops\r\n cropped_image = image[y: y + h, x: x + w]\r\n #resize\r\n cropped_image = cv2.resize(cropped_image, (200, 200))\r\n return cropped_image, bounding_box\r\n \r\n return np.array([]), None" }, { "alpha_fraction": 0.7292817831039429, "alphanum_fraction": 0.7596685290336609, "avg_line_length": 24.571428298950195, "blob_id": "311d8c9c5c6aa7cc6f81279afc94ceb6f969a7fc", "content_id": "09875aab31818a4a2076ad66b04722999b2daaf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 372, "license_type": "no_license", "max_line_length": 106, "num_lines": 14, "path": "/README.md", "repo_name": "AntNowak/AttendanceMonitor", "src_encoding": "UTF-8", "text": "# AttendanceMonitor\nLincoln Uni - G5 - TSE Project\n\nSystem to record attendance of students by facial regonition with an add-on system for facemask detection.\n\n•SQL for database supplied\n\n•Run app.py and head to 127.0.0.1:5000\n\n•To train face, run train.py\n\n•To train mask/unmasked, run mask_train.py\n\n•To add images to mask/unmasked dataset, run capture.py\n\n\n\n\n" }, { "alpha_fraction": 0.5080897808074951, "alphanum_fraction": 0.5367953777313232, "avg_line_length": 33.48147964477539, "blob_id": "934ee7ecb0318f6a1ebea044a165704a354a8f6f", "content_id": "b825a48544810431e804b1d87c1e1c066f4f7881", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3832, "license_type": "no_license", "max_line_length": 107, "num_lines": 108, "path": "/AttendanceMonitor/recog.py", "repo_name": "AntNowak/AttendanceMonitor", "src_encoding": "UTF-8", "text": "import cv2\r\nimport image_utils\r\nimport base64\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nclass MaskRecogniser:\r\n def __init__ (self):\r\n #initialsing and loading training files\r\n self.cam = None\r\n json_file = open('model.json', 'r')\r\n loaded_model_json = json_file.read()\r\n json_file.close()\r\n self.model = tf.keras.models.model_from_json(loaded_model_json)\r\n # load weights into new model\r\n self.model.load_weights(\"model.h5\")\r\n print(\"Loaded model from disk\")\r\n self.active = True\r\n\r\n #self.model = tf.keras.models.load_model(\"mask_model\")\r\n\r\n def __del__(self):\r\n self.cam.release()\r\n \r\n def set_camera(self, camera):\r\n self.cam = camera\r\n\r\n #detecting mask\r\n def get_mask(self):\r\n ret, image = self.cam.read()\r\n if(not ret):\r\n print(\"FAILED TO GET IMAGE\")\r\n image_color, bounding_box = image_utils.crop_colour(image)\r\n if(not image_color.size == 0):\r\n rsize_image = cv2.resize(image_color, (200, 200))\r\n np_arr = np.array(rsize_image)\r\n np_arr = np.expand_dims(np_arr, axis = 0)\r\n prediction = self.model.predict(np_arr/255)\r\n\r\n #box for face detection\r\n (x, y, w, h) = bounding_box\r\n cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n\r\n student_info = \"Mask: \" + str(prediction)\r\n #masked - threshold for prediction if mask is on student\r\n if(prediction > 0.9999):\r\n cv2.putText(image, student_info, (x, y + h), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\r\n #unmasked\r\n else: \r\n cv2.putText(image, student_info, (x, y + h), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\r\n \r\n ret, image2 = cv2.imencode('.jpg', image)\r\n data = base64.b64encode(image2).decode(\"UTF-8\")\r\n return -1, prediction[0][0], data\r\n else:\r\n ret, image2 = cv2.imencode('.jpg', image)\r\n data = base64.b64encode(image2).decode(\"UTF-8\")\r\n return -1, -1, data\r\n\r\n\r\nclass Recogniser:\r\n #initialsing and loading training file\r\n def __init__ (self):\r\n self.cam = None\r\n self.recogniser = cv2.face.LBPHFaceRecognizer_create()\r\n self.recogniser.read(\"student_train.yml\")\r\n self.active = True\r\n\r\n def __del__(self):\r\n self.cam.release()\r\n\r\n def set_camera(self, camera):\r\n self.cam = camera\r\n\r\n def get_student_id(self):\r\n ret, image = self.cam.read()\r\n if(not ret):\r\n print(\"FAILED TO GET IMAGE\")\r\n image_grey, bounding_box = image_utils.crop_and_greyscale(image)\r\n if(not image_grey.size == 0):\r\n studentID, confidence = self.recogniser.predict(image_grey)\r\n #is_mask, confidence = self.mask_recogniser.predict(image_grey)\r\n\r\n #box for face detection\r\n (x, y, w, h) = bounding_box\r\n cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n\r\n student_info = \"Student ID: \" + str(studentID) + \" (\" + str(confidence) + \"%)\"\r\n cv2.putText(image, student_info, (x, y + h), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)\r\n \r\n ret, image = cv2.imencode('.jpg', image)\r\n data = base64.b64encode(image).decode(\"UTF-8\")\r\n return studentID, confidence, data\r\n else:\r\n ret, image = cv2.imencode('.jpg', image)\r\n data = base64.b64encode(image).decode(\"UTF-8\")\r\n return -1, -1, data\r\n\r\n#r = MaskRecogniser()\r\n\r\n#while True:\r\n# s, c, i = r.get_student_id()\r\n# cv2.imshow('video', i)\r\n# k = cv2.waitKey(30) & 0xff\r\n# if k == 27: # press 'ESC' to quit\r\n# break\r\n\r\ncv2.destroyAllWindows()\r\n" }, { "alpha_fraction": 0.5167856216430664, "alphanum_fraction": 0.5323817133903503, "avg_line_length": 37.18134689331055, "blob_id": "cefcd72a785d04760d98b1e7bc456b09add18f2a", "content_id": "e27a0e8908c4d6ba7e7e27249759fd374874f3d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7570, "license_type": "no_license", "max_line_length": 97, "num_lines": 193, "path": "/AttendanceMonitor/mask_train.py", "repo_name": "AntNowak/AttendanceMonitor", "src_encoding": "UTF-8", "text": "import tensorflow as tf\r\nimport cv2\r\nimport random\r\nimport image_utils\r\nimport numpy as np\r\nfrom os import path, mkdir, listdir\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras.layers import Activation, Dropout, Flatten, Dense\r\n\r\n#loading images of mask/unmasked dataset with validation images\r\nclass MaskTrainNN:\r\n def load_training_set2(self):\r\n train_datagen = ImageDataGenerator(\r\n rotation_range=40,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest')\r\n\r\n test_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\n #training set\r\n train_generator = train_datagen.flow_from_directory(\r\n 'temp_data/train', \r\n target_size=(200, 200), \r\n batch_size=4,\r\n class_mode='binary') \r\n\r\n #validation set\r\n validation_generator = test_datagen.flow_from_directory(\r\n 'temp_data/validation',\r\n target_size=(200, 200),\r\n batch_size=4,\r\n class_mode='binary')\r\n\r\n return train_generator, validation_generator\r\n\r\n\r\n def load_training_set(self):\r\n print(\"Loading Training Set\")\r\n mask_data_dir = \"mask_data\"\r\n masked_folder = \"masked\"\r\n unmasked_folder = \"unmasked\"\r\n\r\n #finding mask dataset directories\r\n mask_dirs = listdir(mask_data_dir + \"/\" + masked_folder)\r\n unmask_dirs = listdir(mask_data_dir + \"/\" + unmasked_folder)\r\n images = []\r\n ids = []\r\n masked_found = 0\r\n unmasked_found = 0\r\n\r\n #iterate the masked data set\r\n for u in mask_dirs:\r\n user_files = listdir(mask_data_dir + \"/\" + masked_folder + \"/\" + u) \r\n for f in user_files: \r\n #print(mask_data_dir + \"/\" + masked_folder + \"/\" + u + \"/\" + f)\r\n if('┼' in f):\r\n continue\r\n image = cv2.imread(mask_data_dir + \"/\" + masked_folder + \"/\" + u + \"/\" + f)\r\n image_color, _ = image_utils.crop_colour(image)\r\n if(not image_color.size == 0):\r\n ids.append([1, 0])\r\n images.append(image_color)\r\n masked_found += 1\r\n print(\"Masked: \" + str(masked_found))\r\n\r\n #iterate the unmasked data set\r\n for u in unmask_dirs: \r\n #print(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u)\r\n user_files = listdir(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u)\r\n for f in user_files:\r\n #print(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u + \"/\" + f)\r\n image = cv2.imread(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u + \"/\" + f)\r\n image_color, _ = image_utils.crop_colour(image)\r\n if(not image_color.size == 0):\r\n ids.append([0, 1])\r\n images.append(image_color)\r\n unmasked_found += 1\r\n print(\"Unmasked: \" + str(unmasked_found))\r\n\r\n print(\"Total Masked: \" + str(masked_found))\r\n print(\"Total Unmasked: \" + str(unmasked_found))\r\n np_im = np.array(images, dtype=np.float32)\r\n np_id = np.array(ids, dtype=np.float32)\r\n return np_im.astype(np.float32), np_id.astype(np.float32)\r\n\r\n #keras used for model\r\n def build_model(self):\r\n model = tf.keras.models.Sequential([\r\n tf.keras.layers.Conv2D(100, (3,3), activation='relu', input_shape=(200, 200, 3)),\r\n tf.keras.layers.MaxPooling2D(2,2),\r\n \r\n tf.keras.layers.Conv2D(100, (3,3), activation='relu'),\r\n tf.keras.layers.MaxPooling2D(2,2),\r\n \r\n tf.keras.layers.Flatten(),\r\n tf.keras.layers.Dropout(0.5),\r\n tf.keras.layers.Dense(50, activation='relu'),\r\n tf.keras.layers.Dense(1, activation='sigmoid')\r\n ])\r\n model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\r\n return model\r\n\r\n #run training for mask/unmask detection\r\n def run_training2(self):\r\n model = self.build_model()\r\n train_generator, validation_generator = self.load_training_set2()\r\n model.fit_generator(\r\n train_generator,\r\n epochs=50,\r\n validation_data=validation_generator)\r\n\r\n # serialize model to JSON\r\n model_json = model.to_json()\r\n with open(\"model.json\", \"w\") as json_file:\r\n json_file.write(model_json)\r\n # serialize weights to HDF5\r\n model.save_weights(\"model.h5\")\r\n print(\"Saved weights to disk\")\r\n #model.save(\"mask_model\")\r\n print(\"Done\")\r\n\r\n def run_training(self):\r\n model = self.build_model()\r\n x_train, y_train = self.load_training_set()\r\n print(\"Start Fitting\")\r\n history = model.fit(x_train, y_train, batch_size=64, epochs=1)\r\n\r\n # serialize model to JSON\r\n model_json = model.to_json()\r\n with open(\"model.json\", \"w\") as json_file:\r\n json_file.write(model_json)\r\n # serialize weights to HDF5\r\n model.save_weights(\"model.h5\")\r\n print(\"Saved model to disk\")\r\n\r\n scores = model.evaluate(x_train, y_train, verbose=0)\r\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\r\n #model.save(\"mask_model\")\r\n print(\"Done\")\r\n \r\n \r\n#----- OLD METHOD OF TRAINING --- NOT USED DUE TO .YML TOO BIG (11gb+ with supplied dataset)-----\r\n#https://github.com/X-zhangyang/Real-World-Masked-Face-Dataset - masked dataset source - NOT USED\r\nclass MaskTrain:\r\n def run_training(self):\r\n mask_data_dir = \"mask_data\"\r\n masked_folder = \"masked\"\r\n unmasked_folder = \"unmasked\"\r\n\r\n #finding mask dataset directories\r\n mask_dirs = listdir(mask_data_dir + \"/\" + masked_folder)\r\n unmask_dirs = listdir(mask_data_dir + \"/\" + unmasked_folder)\r\n images = []\r\n ids = []\r\n\r\n #iterate the masked data set\r\n for u in mask_dirs:\r\n user_files = listdir(mask_data_dir + \"/\" + masked_folder + \"/\" + u)\r\n for f in user_files:\r\n print(mask_data_dir + \"/\" + masked_folder + \"/\" + u + \"/\" + f)\r\n if('┼' in f):\r\n continue\r\n image = cv2.imread(mask_data_dir + \"/\" + masked_folder + \"/\" + u + \"/\" + f)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n ids.append(1)\r\n images.append(image)\r\n\r\n #iterate the unmasked data set\r\n for u in unmask_dirs:\r\n print(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u)\r\n user_files = listdir(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u)\r\n for f in user_files:\r\n print(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u + \"/\" + f)\r\n image = cv2.imread(mask_data_dir + \"/\" + unmasked_folder + \"/\" + u + \"/\" + f)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n ids.append(0)\r\n images.append(image)\r\n\r\n recogniser = cv2.face.LBPHFaceRecognizer_create()\r\n recogniser.train(images, np.array(ids))\r\n recogniser.write(\"mask_train.yml\")\r\n\r\nr = MaskTrainNN()\r\nr.run_training2()\r\n#r = MaskTrain()\r\n#r.run_training()\r\n\r\n\r\n" }, { "alpha_fraction": 0.5958254337310791, "alphanum_fraction": 0.6056050062179565, "avg_line_length": 35.6363639831543, "blob_id": "303e8a27c1ce503c64a9134ed52429633057cd2b", "content_id": "73367839b2c67f1062367a0095af644b6b4f7388", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6851, "license_type": "no_license", "max_line_length": 212, "num_lines": 187, "path": "/AttendanceMonitor/app.py", "repo_name": "AntNowak/AttendanceMonitor", "src_encoding": "UTF-8", "text": "from threading import Lock\nfrom flask import Flask, render_template, request, session, redirect, url_for, g\nfrom flask_socketio import SocketIO, emit, disconnect\nfrom recog import Recogniser\nfrom recog import MaskRecogniser\nfrom flask_mysqldb import MySQL\nfrom datetime import date\nimport MySQLdb as MySQLAsync\nimport MySQLdb.cursors\nimport cv2\nimport datetime\nimport base64\n\n# Set this variable to \"threading\", \"eventlet\" or \"gevent\" to test the\n# different async modes, or leave it set to None for the application to choose\n# the best option based on installed packages.\nasync_mode = None\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app, async_mode=async_mode)\nthread = None\nthread_lock = Lock()\n\n#database connection\nmysql = MySQL()\nmysql.init_app(app)\n\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = ''\napp.config['MYSQL_DB'] = 'studentrecog'\n\ndef background_thread():\n #Intial state values\n state = 0\n found_student_id = -1\n found_confidence = -1\n socketio.emit('state_change', { 'new_state': 'face' })\n cam = cv2.VideoCapture(0)\n r = Recogniser()\n m = MaskRecogniser()\n m.set_camera(cam)\n r.set_camera(cam)\n wait_start = -1\n\n while True:\n socketio.sleep(0.03)\n #Face state - detecting studentID using recogniser\n if(state == 0):\n r1, r2, image = r.get_student_id()\n socketio.emit('image_data', { 'buffer': 'data:image/jpg;base64,'+image, 'student_id' : r1, 'confidence' : r2 , 'wait_timer' : '-1'})\n if(r2 < 90) and (r2 != -1 and r1 != -1):\n print(\"Found Student ID: \" + str(r1))\n found_student_id = r1\n found_confidence = r2\n state = 1\n socketio.emit('state_change', { 'new_state': 'mask' })\n #Mask state - detecting if student is masked using maskrecogniser\n elif(state == 1):\n _, r2, image = m.get_mask()\n socketio.emit('image_data', { 'buffer': 'data:image/jpg;base64,'+image, 'student_id' : found_student_id, 'confidence' : str(r2) , 'wait_timer' : '-1'})\n if(r2 >= 0.9999):\n state = 2\n socketio.emit('state_change', { 'new_state': 'update database' })\n #Database state - making database request to insert student into attendance register \n elif(state == 2):\n \n #Mysql in this case has to be manually connected, flask_mysqldb will not instantiate the connection\n mysql_async = MySQLAsync.connect(\"localhost\", \"root\", \"\", \"studentrecog\")\n cur = mysql_async.cursor()\n cur.execute(\"INSERT attendance_register SET Student_ID = \"+str(found_student_id)+\", Lecture_ID ='1', Present = '1'\")\n mysql_async.commit()\n cur.close()\n found_student_id = -1\n found_confidence = 0 \n socketio.emit('state_change', { 'new_state': 'wait' })\n state = 3\n #Waiting state - Without waiting state, system is too quick going back to face state\n elif(state == 3):\n #Use default camera image whilst we wait\n ret, image = cam.read()\n ret, image2 = cv2.imencode('.jpg', image)\n data = base64.b64encode(image2).decode(\"UTF-8\")\n\n if(wait_start == -1):\n wait_start = datetime.datetime.now()\n current_wait = (datetime.datetime.now() - wait_start).total_seconds()\n if(current_wait > 8):\n socketio.emit('state_change', { 'new_state': 'face' })\n state = 0\n wait_start = -1\n else:\n socketio.emit('image_data', { 'buffer': 'data:image/jpg;base64,'+data, 'wait_timer' : round(8-current_wait, 2)})\n\n\[email protected]_request\ndef before_request():\n g.user = None\n\n if 'user_id' in session:\n user = session['user_id'] #name of user needs to be taken from databse\n g.user = user\n\[email protected]('/')\ndef blank():\n return redirect(url_for('login'))\n\n#lecture render\[email protected]('/lectures')\ndef index():\n if session.get('logged_in') == True:\n return render_template('Lectures.html')\n else:\n return redirect(url_for('login'))\n \n#homepage render\[email protected]('/homepage')\ndef homepage():\n if session.get('logged_in') == True:\n currentDate = date.today()\n corDateT = currentDate.strftime(\"%Y-%m-%d %H:%M:%S\")\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT Module_Name, Start_DateTime, End_DateTime FROM lectures INNER JOIN modules ON lectures.Module_ID=modules.Module_ID Where Start_DateTime >= %s ORDER BY Start_DateTime ASC\",(corDateT, ))\n mods = cur.fetchall()\n return render_template('Homepage.html', name=g.user, data=mods)\n else:\n return redirect(url_for('login'))\n\n#enroll render with database entry of student\[email protected]('/enroll', methods=['GET', 'POST'])\ndef enroll():\n if session.get('logged_in') == True:\n if request.method == \"POST\":\n details = request.form\n firstName = details['firstName']\n lastName = details['lastName']\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO Students(First_Name, Last_Name) VALUES (%s, %s)\", (firstName, lastName))\n mysql.connection.commit()\n cur.close()\n return redirect(url_for('index'))\n return render_template('Enroll.html')\n else:\n return redirect(url_for('login'))\n\n\n#login render with database connection for login information\[email protected]('/Login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n session.pop('user_id', None)\n username = request.form['username']\n password = request.form['password']\n cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n cursor.execute(\"SELECT * FROM Lecturers WHERE username = %s AND password = %s\",(username, password,))\n account = cursor.fetchone()\n if account:\n session['logged_in'] = True\n session['user_id'] = account['username']\n session['password'] = account['password']\n return redirect(url_for('homepage'))\n else:\n return redirect(url_for('login'))\n else:\n return render_template('Login.html')\n\[email protected]('/logout')\ndef logout():\n session.pop('user_id', None)\n session.pop('logged_in', None)\n return redirect(url_for('login'))\n\[email protected]\ndef connect():\n global thread\n with thread_lock:\n if thread is None:\n thread = socketio.start_background_task(background_thread)\n\[email protected]('disconnect')\ndef test_disconnect():\n print('Client disconnected', request.sid)\n\n\nif __name__ == '__main__':\n socketio.run(app, debug=True)\n" }, { "alpha_fraction": 0.546466588973999, "alphanum_fraction": 0.5706679821014404, "avg_line_length": 32.46666717529297, "blob_id": "c128e9ba9ab725f9f39ec1cb1ae21f4369fadf38", "content_id": "38483716c85746b3da728b017596f2737b60b99c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2066, "license_type": "no_license", "max_line_length": 88, "num_lines": 60, "path": "/AttendanceMonitor/capture.py", "repo_name": "AntNowak/AttendanceMonitor", "src_encoding": "UTF-8", "text": "import cv2\r\nimport image_utils\r\nimport base64\r\nimport numpy as np\r\nimport os\r\nimport random\r\n\r\nclass Sampler:\r\n def __init__ (self):\r\n self.cam = cv2.VideoCapture(0)\r\n\r\n def __del__(self):\r\n self.cam.release()\r\n #creating temp directories for dataset\r\n def create_dirs(self):\r\n os.mkdir(\"temp_data\")\r\n os.mkdir(\"temp_data/masked\")\r\n os.mkdir(\"temp_data/unmasked\")\r\n #saving images taken \r\n def save_image(self, im, masked = False):\r\n number = random.randint(0, 100000)\r\n if(masked):\r\n cv2.imwrite(\"temp_data/masked/\" + str(number) + \".jpg\", im)\r\n else:\r\n cv2.imwrite(\"temp_data/unmasked/\" + str(number) + \".jpg\", im)\r\n return\r\n\r\n #detecting face then displaying box around detected face\r\n def get_image(self):\r\n ret, image = self.cam.read()\r\n if(not ret):\r\n print(\"FAILED TO GET IMAGE\")\r\n image_color, bounding_box = image_utils.crop_colour(image)\r\n if(not image_color.size == 0):\r\n rsize_image = cv2.resize(image_color, (200, 200))\r\n (x, y, w, h) = bounding_box\r\n cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\r\n return 1, rsize_image, image\r\n else:\r\n return -1, None, image\r\n\r\nr = Sampler()\r\nr.create_dirs()\r\ncv2.namedWindow('frame', cv2.WINDOW_NORMAL)\r\n#controls to take images after detecting face (only when box (detected face) is present)\r\nwhile True:\r\n success, r_image, i = r.get_image()\r\n cv2.imshow('video', i)\r\n k = cv2.waitKey(30) & 0xff\r\n if k == 27: # press 'ESC' to quit\r\n break\r\n if k == 97 and success == 1: # press 'A' to save unmasked\r\n print(\"Saving Unmasked Image\")\r\n r.save_image(r_image, False)\r\n elif k == 98 and success == 1: # press 'B' to save masked\r\n print(\"Saving Masked Image\")\r\n r.save_image(r_image, True)\r\n #no face detected whilst pressing A or B (no box around face) - nothing saved\r\n elif k == 97 or k == 98:\r\n print(\"Failed To Find Face\")" }, { "alpha_fraction": 0.5236570239067078, "alphanum_fraction": 0.6565729975700378, "avg_line_length": 43.80666732788086, "blob_id": "c2bbfcdce0dc79e7b2512e5a220586b3fc9ad0ea", "content_id": "99d586537ced5c21e23e88349e9ba29aff2b7b83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 6869, "license_type": "no_license", "max_line_length": 177, "num_lines": 150, "path": "/AttendanceMonitor/System-Database.sql", "repo_name": "AntNowak/AttendanceMonitor", "src_encoding": "UTF-8", "text": "DROP DATABASE IF EXISTS StudentRecog;\r\n\r\nCREATE DATABASE StudentRecog;\r\n\r\nCREATE TABLE StudentRecog.School (\r\n\tSchool_ID int(11) NOT NULL,\r\n\tSchool_Name varchar(30),\r\n\tAddress_Line_1 varchar(30),\r\n\tAddress_Line_2 varchar(30),\r\n\tPostcode varchar(8),\r\n\tPhone_num int(15),\r\n\tPRIMARY KEY(School_ID)\r\n);\r\n\r\nCREATE TABLE StudentRecog.Courses (\r\n\tCourse_ID int(11) NOT NULL AUTO_INCREMENT,\r\n\tSchool_ID int(11),\r\n\tCourse_Name varchar(30),\r\n\tCourse_Level int(1),\r\n\tStart_Date date,\r\n\tEnd_Date date,\r\n\tPRIMARY KEY(Course_ID),\r\n\tFOREIGN KEY(School_ID) REFERENCES School(School_ID)\r\n)AUTO_INCREMENT=1;\r\n\r\nCREATE TABLE StudentRecog.Students (\r\n\tStudent_ID int(11) NOT NULL AUTO_INCREMENT,\r\n\tFirst_Name varchar(30),\r\n\tLast_Name varchar(30),\r\n\tDOB date,\r\n\tGender varchar(15) NOT NULL,\r\n\tAddress_Line_1 varchar(30),\r\n\tAddress_Line_2 varchar(30),\r\n\tPostcode varchar(8),\r\n\tPhone_Num int(15),\r\n\tEmail_Address varchar(40) DEFAULT NULL,\r\n\tMask_Exemption BOOLEAN NOT NULL,\r\n\tCourse_ID int,\r\n\tPRIMARY KEY(Student_ID),\r\n\tFOREIGN KEY(Course_ID) REFERENCES Courses(Course_ID)\r\n)AUTO_INCREMENT=88;\r\n\r\nCREATE TABLE StudentRecog.Modules (\r\n\tModule_ID int(11) NOT NULL AUTO_INCREMENT,\r\n\tCourse_ID int(11),\r\n\tModule_Name varchar(30),\r\n\tStart_Date date,\r\n\tEnd_Date date,\r\n\tPRIMARY KEY(Module_ID),\r\n\tFOREIGN KEY(Course_ID) REFERENCES Courses(Course_ID)\r\n)AUTO_INCREMENT=1;\r\n\r\nCREATE TABLE StudentRecog.Lecturers (\r\n\tLecturer_ID int(11) NOT NULL AUTO_INCREMENT,\r\n\tLecturer_First_Name varchar(30),\r\n\tLecturer_Last_Name varchar(30),\r\n\tDOB date,\r\n\tGender varchar(15) NOT NULL,\r\n\tAddress_Line_1 varchar(30),\r\n\tAddress_Line_2 varchar(30),\r\n\tPostcode varchar(8),\r\n\tPhone_Num int(15),\r\n\tEmail_Address varchar(40) DEFAULT NULL,\r\n\tusername varchar(45),\r\n password varchar(90),\r\n\tPRIMARY KEY(Lecturer_ID)\r\n)AUTO_INCREMENT=1;\r\n\r\nCREATE TABLE StudentRecog.Lectures (\r\n\tLecture_ID int(11) NOT NULL AUTO_INCREMENT,\r\n\tModule_ID int(11),\r\n\tLecturer_ID int,\r\n\tStart_DateTime datetime(6),\r\n\tEnd_DateTime datetime(6),\r\n\tPRIMARY KEY(Lecture_ID),\r\n\tFOREIGN KEY(Module_ID) REFERENCES Modules(Module_ID),\r\n\tFOREIGN KEY(Lecturer_ID) REFERENCES Lecturers(Lecturer_ID)\t\r\n)AUTO_INCREMENT=1;\r\n\r\nCREATE TABLE StudentRecog.Attendance_Register (\r\n\tRegister_ID int (11) NOT NULL AUTO_INCREMENT,\r\n\tStudent_ID int(11),\r\n\tLecture_ID int(11),\r\n\tPresent boolean,\r\n\tPRIMARY KEY(Register_ID),\r\n\tFOREIGN KEY(Student_ID) REFERENCES Students(Student_ID),\r\n\tFOREIGN KEY(Lecture_ID) REFERENCES Lectures(Lecture_ID)\r\n)AUTO_INCREMENT=1;\r\n\r\n\r\nINSERT INTO StudentRecog.School (School_ID, School_Name, Address_Line_1, Address_Line_2, Postcode, Phone_Num)\r\nvalues\r\n('1', 'The University of Lincoln', 'Brayford Way', 'Brayford Pool', 'LN61 7TS', '0774522628');\r\n\r\nINSERT INTO StudentRecog.Courses (School_ID, Course_Name, Course_Level, Start_Date, End_Date)\r\nvalues\r\n('1', 'Computer Science', '1', '2020/08/15', '2023/05/15'),\r\n('1', 'Computer Science', '2', '2020/08/15', '2023/05/15'),\r\n('1', 'Computer Science', '3', '2020/08/15', '2023/05/15');\r\n\r\nINSERT INTO StudentRecog.Students (First_Name, Last_Name, DOB, Gender, Address_Line_1, Address_Line_2, Postcode, Phone_Num, Email_Address, Mask_Exemption, Course_ID)\r\nvalues\r\n('James','Smith','1996/11/24','Male','Address line 1','Address line 2', 'PH51 6EL', '0773917498', '[email protected]', false, '1'),\r\n('Sarah','Wood','1995/12/19','Female','Address line 1','Address line 2', 'GK24 6DF', '0778267492', '[email protected]', true, '1'),\r\n('Harrison','Goddard','19900/09/13','Male','Address line 1','Address line 2', 'DL37 2DQ', '0779283027', '[email protected]', false, '1'),\r\n('Jimmy','Smith','2001/11/12','Male','Address line 1','Address line 2', 'SK21 1KS', '0777839478', '[email protected]', false, '1'),\r\n('Max', 'West', '1999/04/21', 'Male', 'Address line 1', 'Address line 2', 'LD32 2DA', '0772917846', '[email protected]', false, '2'),\r\n('Sally', 'Weaton', '2000/11/18', 'Female', 'Address line 1', 'Address line 2', 'DL29 2DK', '0778176392', '[email protected]', false, '2'),\r\n('Fenton', 'Alan', '1995/04/12', 'Male', 'Address line 1', 'Address line 2', 'DK18 1SK', '0777163615', '[email protected]', true, '2'),\r\n('Louise', 'Western', '1992/06/19', 'Female', 'Address line 1', 'Address line 2', 'SO29 2SL', '0773912738', '[email protected]', false, '2'),\r\n('Claire', 'Dunn', '2000/07/18', 'Female', 'Address line 1', 'Address line 2', 'SL12 3SD', '0779327103', '[email protected]', true, '3'),\r\n('Charlie', 'Maxwell', '2002/11/16', 'Male', 'Address line 1', 'Address line 2', 'SJ13 1SL', '0772839173', '[email protected]', true, '3'),\r\n('Alex', 'Westbrook', '1993/11/21', 'Male', 'Address line 1', 'Address line 2', 'DL23 5SD', '0771937162', '[email protected]', true, '3'),\r\n('Matthew', 'Broughton', '2002/07/12', 'Male', 'Address line 1', 'Address line 2', 'AL21 1SL', '0771836182', '[email protected]', true, '3');\r\n\r\nINSERT INTO StudentRecog.Modules (Course_ID, Module_Name, Start_Date, End_Date)\r\nvalues\r\n('1', 'Programming Fundamentals', '2020/08/15', '2020/12/03'),\r\n('1', 'Algorithms and Complexity', '2020/08/15', '2020/12/13'),\r\n('1', 'Computer Architectures', '2020/12/03', '2021/03/18'),\r\n('1', 'Maths for Computing', '2020/12/13', '2021/05/28'),\r\n('1', 'Problem Solving', '2020/08/01', '2021/02/14'),\r\n('2', 'Advanced Programming', '2020/08/15', '2020/12/03'),\r\n('2', 'Artifical Intelligence', '2020/08/15', '2020/12/13'),\r\n('2', 'Network Fundamentals', '2020/12/03', '2021/05/26'),\r\n('2', 'Scalable Databases', '2020/12/13', '2021/05/28'),\r\n('2', 'User Experience', '2020/12/01', '2021/05/24'),\r\n('3', 'Big Data', '2020/08/15', '2020/12/03'),\r\n('3', 'Machine Learning', '2020/08/15', '2020/12/13'),\r\n('3', 'Parallel Programming', '2020/12/03', '2021/05/25'),\r\n('3', 'Project', '2020/12/13', '2021/05/21'),\r\n('3', 'Option', '2020/08/01', '2021/02/14');\r\n\r\nINSERT INTO StudentRecog.Lecturers (Lecturer_First_Name, Lecturer_Last_Name, DOB, Gender, Address_Line_1, Address_Line_2, Postcode, Phone_Num, Email_Address, username, password)\r\nvalues\r\n('John', 'Smith', '1976/04/23', 'Male', 'Address line 1', 'Address line 2', 'LS21 2DQ', '0771936789', '[email protected]', 'jsmith', 'password1'),\r\n('Sarah', 'Johnson', '1986/11/18', 'Female', 'Address line 1', 'Address line 2', 'LN12 2FD', '0778273017', '[email protected]', 'sjohnson', 'password2'),\r\n('Jim', 'Eastman', '1960/04/13', 'Male', 'Address line 1', 'Address line 2', 'AJ27 2AW', '0771937461', '[email protected]', 'jeastman', 'password3'),\r\n('Adam', 'Sharp', '1979/08/18', 'Male', 'Address line 1', 'Address line 2', 'SK23 1FJ', '0778163846', '[email protected]', 'asharp', 'password4'),\r\n('Laura', 'Green', '1976/11/20', 'Female', 'Address line 1', 'Address line 2', 'KD13 5DF', '0772936718', '[email protected]', 'lgreen', 'password5');\r\n\r\nINSERT INTO StudentRecog.Lectures (Module_ID, Lecturer_ID, Start_DateTime, End_DateTime)\r\nvalues\r\n('4', '1', '2021-05-28 08:00:00', '2021-05-28 10:00:00'),\r\n('13', '4', '2021-05-27 08:00:00', '2021-05-27 10:00:00'),\r\n('10', '5', '2021-05-26 08:00:00', '2021-05-26 10:00:00');\r\n\r\nINSERT INTO StudentRecog.Attendance_Register ()\r\nvalues\r\n();" } ]
8
kl738/codeforces
https://github.com/kl738/codeforces
b7047b5a80dfc642debe927dbfc42249e95b6bf3
6e4501130738f39139009a097352f27c2282c595
7e3d7f671b0ae5bb8a8a7edeef32b126d316dae7
refs/heads/master
2020-03-23T21:31:14.057503
2019-07-03T02:07:15
2019-07-03T02:07:15
142,112,930
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4861878454685211, "alphanum_fraction": 0.49723756313323975, "avg_line_length": 16.238094329833984, "blob_id": "b6b2012eb5fda4142e0ef06849c0495264b1c716", "content_id": "eb6a7ac94b69433cc072eff2902e113f7fc47248", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 362, "license_type": "no_license", "max_line_length": 62, "num_lines": 21, "path": "/educational/1/b.cpp", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <string>\nusing namespace std;\n\nint main(){\n\tstring s;\n\tint m;\n\tint l, r, k;\n\tint length;\n\tint net;\n \tcin >> s; \n\tcin >> m;\n\tfor(int i = 0; i < m; ++i){\n\t\tcin >> l >> r >> k;\n\t\tlength = r - l + 1;\n\t\tnet = k % length;\n\t\tif(net == 0) net = length;\n\t\trotate(s.begin() + l-1, s.begin() + r-net, s.begin() + r);\t\t\n\t}\n\tcout << s << \"\\n\";\n}\n" }, { "alpha_fraction": 0.43870967626571655, "alphanum_fraction": 0.47096773982048035, "avg_line_length": 16.884614944458008, "blob_id": "0717b08a5870a069c33a9ee9a6701171815c44a4", "content_id": "1828c1465568987b8bd6c18e7ed52f5082faf052", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 50, "num_lines": 26, "path": "/493/B.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n, b = map(int,input().split())\nv = list(map(int,input().split()))\n\ncosts = []\n\nodds = 0\nevens = 0\nfor i in range(n-1):\n if v[i] % 2 == 0:\n evens += 1\n else:\n odds += 1\n if odds == evens and odds != 0 and i != (n-1):\n costs.append(abs(v[i+1]-v[i]))\n odds = 0\n evens = 0\n\ncount = 0\nacc = 0\nfor cost in sorted(costs):\n if acc + cost <= b:\n count += 1\n acc = acc + cost\n else:\n break\nprint(count)\n" }, { "alpha_fraction": 0.5227963328361511, "alphanum_fraction": 0.5501520037651062, "avg_line_length": 16.342105865478516, "blob_id": "6a68b7844a6c3f088aeb41923699feb76f8a4b9f", "content_id": "f93dfc6f1403b0c1df13b848350f8b486bdcc0a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 658, "license_type": "no_license", "max_line_length": 41, "num_lines": 38, "path": "/499/D.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "from sys import stdin, stdout\nm, n = map(int, stdin.readline().split())\n\ntruths = [True] * n \nfor i in range(n):\n\tstdout.write(\"1\\n\")\n\tstdout.flush()\n\tres = stdin.readline().strip()\n\tif res == \"0\":\n\t\texit()\n\telif res == \"1\":\n\t\ttruths[i] = True\n\telif res == \"-1\":\n\t\ttruths[i] = False\n\ndiv = 30 // n\ntruths = truths * (div + 1)\n\nlow, high = 1, m\nfor i in range(30):\n\tmid = (low+high+1)//2\n\tstdout.write(str(mid)+\"\\n\")\n\tstdout.flush()\n\tres = stdin.readline().strip()\n\tif res == \"0\":\n\t\texit()\n\tif res == \"-2\":\n\t\texit()\n\tif truths[i]:\n\t\tif res == \"1\":\n\t\t\tlow = mid\n\t\telse:\n\t\t\thigh = mid -1\n\tif not truths[i]:\n\t\tif res == \"1\":\n\t\t\thigh = mid -1\n\t\telse:\n\t\t\tlow = mid" }, { "alpha_fraction": 0.3698224723339081, "alphanum_fraction": 0.3757396340370178, "avg_line_length": 15.899999618530273, "blob_id": "841758b89b019380230ee2b1d79dcd2067e370ca", "content_id": "bd1a3b4a667087214329c5d8405a497a62eba734", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 338, "license_type": "no_license", "max_line_length": 33, "num_lines": 20, "path": "/problems/1092A.cpp", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "#include <bits/stdc++.h>\nusing namespace std;\n\nchar getChar(int x){\n return (char)('a' + x);\n}\nint main(){\n int t;\n cin >> t;\n for(int i = 0; i < t; i++){\n int n, k;\n cin >> n >> k;\n int p = 0;\n while(p<n){\n cout << getChar(p%k);\n p++;\n }\n cout << \"\\n\";\n }\n}\n" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.446153849363327, "avg_line_length": 15.25, "blob_id": "356f5d0548bfa772468bd854647bc0e34f155e28", "content_id": "9b7085248c10bf7766f446f485c4f682750638d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 34, "num_lines": 16, "path": "/493/A.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\nv = list(map(int,input().split()))\n\nif n == 1:\n print(-1)\nelif n == 2:\n if v[0] == v[1]:\n print(-1)\n else:\n print(1)\n print(1)\nelif n > 2:\n new = sorted(v)\n gs = new[0]\n print(1)\n print(v.index(gs)+1)\n" }, { "alpha_fraction": 0.4947839081287384, "alphanum_fraction": 0.5424739122390747, "avg_line_length": 17.16216278076172, "blob_id": "f6a45f3288c087030fd95f3c9f0770b4089a348b", "content_id": "a36c477ef29d9e7e2818e6166db76a8a1dd3f6c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 671, "license_type": "no_license", "max_line_length": 58, "num_lines": 37, "path": "/498/D.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\ns1 = input()\ns2 = input()\n\nlow, high = 0, n-1\n\ngcount = 0\nwhile high >= low:\n\td = {}\n\tlst = []\n\tif high > low:\n\t\tlst.append(s1[low])\n\t\tlst.append(s1[high])\n\t\tlst.append(s2[low])\n\t\tlst.append(s2[high])\n\telif high == low:\n\t\tlst.append(s1[low])\n\t\tlst.append(s2[low])\n\tfor c in lst:\n\t\tif d.get(c) == None:\n\t\t\td[c] = 1\n\t\telse:\n\t\t\td[c] += 1\n\tif len(lst) == 2 and 1 in d.values():\n\t\tgcount += 1\n\telif len(lst) == 4 and 3 in d.values():\n\t\tgcount += 1\n\telif len(lst) == 4 and len(d) == 3 and s1[low]==s1[high]:\n\t\tgcount += 2\n\telif len(lst) == 4 and len(d) == 3:\n\t\tgcount += 1\n\telif len(lst) == 4 and len(d) == 4:\n\t\tgcount += 2\n\thigh -= 1\n\tlow += 1\n\nprint(gcount)" }, { "alpha_fraction": 0.4575645625591278, "alphanum_fraction": 0.49077489972114563, "avg_line_length": 12.600000381469727, "blob_id": "27459686e7a9e2d5733d2bd0bc6fcb4e217761f6", "content_id": "5b9786e7c3aad5d4d630c120ae4c71624f4beb4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 47, "num_lines": 20, "path": "/259/A.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\n\nspacing = n//2\nnd = 1\n\nwhile nd < n:\n\trow = \"*\" * spacing + \"D\" * nd + \"*\" * spacing\n\tprint(row)\n\tspacing -= 1\n\tnd += 2\n\nprint(\"D\"*n)\nnd -= 2\nspacing +=1\n\nwhile nd >= 1:\n\trow = \"*\" * spacing + \"D\" * nd + \"*\" * spacing\n\tprint(row)\n\tspacing += 1\n\tnd += -2" }, { "alpha_fraction": 0.5451807379722595, "alphanum_fraction": 0.5542168617248535, "avg_line_length": 14.136363983154297, "blob_id": "e4a235e85a89089cb5c7e00b1054844e5210c18c", "content_id": "0c88349fcc1ca4677bc606b99d8d3925f0450454", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 332, "license_type": "no_license", "max_line_length": 34, "num_lines": 22, "path": "/499/E.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n, k = map(int,input().split())\nv = list(map(int,input().split()))\n\ndef gcd(a,b):\n\tif a < b:\n\t\treturn gcd(b,a)\n\tif b == 0:\n\t\treturn a\n\telse:\n\t\treturn gcd(b, a%b) \n\ng = v[0]\nfor i in range(1,n):\n\tg = gcd(g, v[i])\n\nlst = set()\nfor i in range(k):\n\tlst.add(g*i % k)\n\nlst = sorted(list(lst))\nprint(len(lst))\nprint(' '.join(map(str,lst)))" }, { "alpha_fraction": 0.3801169693470001, "alphanum_fraction": 0.39376217126846313, "avg_line_length": 15.54838752746582, "blob_id": "c36a982f480a0fc43eb8c5864789b398103d24f3", "content_id": "27933904eb2608bb19b0d991030f6da92a3dd626", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 513, "license_type": "no_license", "max_line_length": 33, "num_lines": 31, "path": "/problems/1097A.cpp", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "#include <bits/stdc++.h>\nusing namespace std;\n\nint main(){\n string myCard;\n cin >> myCard;\n char myR = myCard[0];\n char myS = myCard[1];\n int i = 0;\n string input;\n char r, s;\n bool flag = false;\n while(i < 5){\n cin >> input;\n r = input[0];\n s = input[1];\n if(myR == r || myS == s){\n flag = true;\n }\n i += 1;\n }\n if(flag == true){\n cout << \"YES\" << \"\\n\";\n }\n else{\n cout << \"NO\" << \"\\n\";\n }\n\n\n \n}\n" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.46666666865348816, "avg_line_length": 14, "blob_id": "9cecab32f52ca90907fac406fcee173535f13593", "content_id": "b030ade0d3f39efdd0f5da04d1e5b5afbeb84555", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15, "license_type": "no_license", "max_line_length": 14, "num_lines": 1, "path": "/401/test.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "print(\"t\">\"s\")\n" }, { "alpha_fraction": 0.5079365372657776, "alphanum_fraction": 0.5428571701049805, "avg_line_length": 20, "blob_id": "c71756b5daf35c71b5157628a368f441656b3139", "content_id": "eb0ec943a7ce5df0b01d6ec52bd9588691335497", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 41, "num_lines": 15, "path": "/493/flush.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "import sys\n\nlow, high = 1, 1000000\n\nwhile low != high:\n mid = (low + high + 1)//2\n sys.stdout.write(str(mid)+\"\\n\")\n sys.stdout.flush()\n result = sys.stdin.readline().strip()\n if result == \"<\":\n high = mid - 1\n else:\n low = mid\nsys.stdout.write(\"! \"+str(low)+\"\\n\")\nsys.stdout.flush()\n" }, { "alpha_fraction": 0.34343433380126953, "alphanum_fraction": 0.36868685483932495, "avg_line_length": 18.83333396911621, "blob_id": "2a489ab9ceeb350752d0513078ef25edf5ad1a76", "content_id": "ecf401c197f3a642cdc3fbd66ee5117d10b89a28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 594, "license_type": "no_license", "max_line_length": 50, "num_lines": 30, "path": "/problems/1088B.cpp", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "#include <bits/stdc++.h>\nusing namespace std;\n\nint main(){\n int n,k;\n cin >> n >> k;\n int a[n];\n set<int> s;\n for(int i = 0; i < n; i++){\n cin >> a[i];\n s.insert(a[i]);\n }\n vector<int> a2;\n for(auto it = s.begin(); it != s.end(); it++){\n a2.push_back(*it);\n }\n sort(a2.begin(),a2.begin());\n for(int i = 0; i < a2.size(); i++){\n if(k==0) break;\n if(i==0) cout << a2[i] << \"\\n\";\n else{\n cout << a2[i]-a2[i-1] << \"\\n\";\n }\n k--;\n }\n while(k>0){\n cout << 0 << \"\\n\";\n k--;\n }\n}" }, { "alpha_fraction": 0.5539568066596985, "alphanum_fraction": 0.568345308303833, "avg_line_length": 22.965517044067383, "blob_id": "5d4d580b888436b8c0bba9932639b2ab138943b4", "content_id": "852e63566780112692b8b85e9cd152e30f6d2eb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 695, "license_type": "no_license", "max_line_length": 59, "num_lines": 29, "path": "/401/C.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "import sys\nn,m = map(int,sys.stdin.readline().rstrip().split())\nmatrix = []\nfor _ in range(n):\n\trow = list(map(int,sys.stdin.readline().rstrip().split()))\n\tmatrix.append(row)\n\n# initialize table\nA = [[1 for _ in range(m)] for _ in range(n)]\nfor i in range(n-2,-1,-1):\n\tfor j in range(m):\n\t\tif matrix[i][j] <= matrix[i+1][j]:\n\t\t\tA[i][j] = A[i+1][j] + 1\n\n# rows = [0] * n\n# for i in range(n):\n# \tlst = [A[i][j] for j in range(m)]\n# \trows[i] = max(lst)\nrows = [max(row) for row in A]\n\nans = \"\"\nk = int(sys.stdin.readline().rstrip())\nfor _ in range(k):\n\tl,r = map(int,sys.stdin.readline().rstrip().split())\n\tif rows[l-1] >= (r - l + 1):\n\t\tans += \"Yes\\n\"\n\telse:\n\t\tans += \"No\\n\"\nsys.stdout.write(ans)\n" }, { "alpha_fraction": 0.5245398879051208, "alphanum_fraction": 0.546012282371521, "avg_line_length": 13.863636016845703, "blob_id": "94d55ae3393c0241dd609055e4f1ea705bc2cffe", "content_id": "c6b23f9ad4aba3f4c8488d4091bed92e33f9b5f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 36, "num_lines": 22, "path": "/499/A2.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n,k = map(int,input().split())\ns = input()\n\ns = ''.join(sorted(s))\ndef cost(c):\n\treturn ord(c) - ord('a') + 1\n\ncurr = 0\ntotal = cost(s[curr])\ncount = 1\n\nfor i in range(1,n):\n\tif count == k:\n\t\tbreak\n\tif cost(s[i]) - cost(s[curr]) >= 2:\n\t\ttotal += cost(s[i])\n\t\tcount += 1\n\t\tcurr = i\nif count == k:\n\tprint(total)\nelse:\n\tprint(-1)" }, { "alpha_fraction": 0.445333331823349, "alphanum_fraction": 0.4866666793823242, "avg_line_length": 19.2702693939209, "blob_id": "34e20d0bcc8b0ee080e64e7b512533fc961ef24f", "content_id": "12c743dfb4a3e6d89eb0b2f826edfb592ae750a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 750, "license_type": "no_license", "max_line_length": 42, "num_lines": 37, "path": "/340/D.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "x1,y1 = map(int,input().split())\nx2,y2 = map(int,input().split())\nx3,y3 = map(int,input().split())\n\npx = [x1,x2,x3]\npy = [y1,y2,y3]\nx = set(px)\ny = set(py)\n\nif len(x) == 1 or len(y) == 1:\n print(1)\nelif len(x) == 2 and len(y) == 2:\n print(2)\nelif len(x) == 2:\n wanted = -1\n for i in range(3):\n if px.count(px[i]) == 1:\n wanted = i\n value = py[wanted]\n py.pop(wanted)\n if value > max(py) or value < min(py):\n print(2)\n else:\n print(3)\nelif len(y) ==2:\n wanted = -1\n for i in range(3):\n if py.count(py[i]) == 1:\n wanted = i\n value = px[wanted]\n px.pop(wanted)\n if value > max(px) or value < min(px):\n print(2)\n else:\n print(3)\nelse:\n print(3)\n" }, { "alpha_fraction": 0.2598870098590851, "alphanum_fraction": 0.299435019493103, "avg_line_length": 15.488371849060059, "blob_id": "351581aa54fd76909cfe3e64684a4ef49661878a", "content_id": "119d12d74e4856958a199018f30004c80ea855ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 708, "license_type": "no_license", "max_line_length": 44, "num_lines": 43, "path": "/problems/1099A.cpp", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "#include <bits/stdc++.h>\nusing namespace std;\n\nint main(){\n int sw, sh;\n int w1, h1;\n int w2, h2;\n cin >> sw >> sh >> w1 >> h1 >> w2 >> h2;\n if(h1 > h2){\n while(sh >= h1){\n sw += sh;\n sh -= 1;\n }\n sw = max(0, sw-w1);\n while(sh >= h2){\n sw += sh;\n sh -= 1;\n }\n sw = max(0, sw-w2);\n }\n else{\n while(sh >= h2){\n sw += sh;\n sh -= 1;\n }\n sw = max(0, sw-w2);\n while(sh >= h1){\n sw += sh;\n sh -= 1;\n }\n sw = max(0, sw-w1);\n }\n\n while(sh > 0){\n sw += sh;\n sh -= 1;\n }\n\n\n \n\n cout << sw << \"\\n\";\n}" }, { "alpha_fraction": 0.46859902143478394, "alphanum_fraction": 0.49033817648887634, "avg_line_length": 16.25, "blob_id": "83281c8ac343a24aff8401ee6b86a3994f8bdb83", "content_id": "84dda61cefd1403fa9183b8db7d6ed064e416b13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 39, "num_lines": 24, "path": "/252/A.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n, d = list(map(int, input().split()))\nsongs = list(map(int, input().split()))\n\ntime = 0\njokes = 0\novertime = False\nwhile True:\n if len(songs) == 0:\n jokes += (d-time)//5\n break\n s = songs.pop()\n if time + s > d:\n overtime = True\n break\n else:\n time += s\n if len(songs) > 0:\n jokes += 2\n time += 10\n\nif overtime:\n print(-1)\nelse:\n print(jokes)\n" }, { "alpha_fraction": 0.45427727699279785, "alphanum_fraction": 0.4690265357494354, "avg_line_length": 16.894737243652344, "blob_id": "70d45cc66cddab979dfee3918644503a2a9a4f58", "content_id": "7cc183b6592968fb2d800d5055dbacc6468bad8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 339, "license_type": "no_license", "max_line_length": 52, "num_lines": 19, "path": "/problems/1095B.cpp", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "#include <bits/stdc++.h>\nusing namespace std;\n\nint main(){\n int n;\n cin >> n;\n int a[n];\n for(int i = 0; i < n; i++){\n cin >> a[i];\n }\n sort(a, a+n);\n int l = 0;\n int r = n-1;\n\n int linstability = a[r-1] - a[l];\n int rinstability = a[r] - a[l+1];\n\n cout << min(linstability, rinstability) << \"\\n\";\n}" }, { "alpha_fraction": 0.47457626461982727, "alphanum_fraction": 0.5461393594741821, "avg_line_length": 25.549999237060547, "blob_id": "0875fbfc0c27158d1e7dbe45234b98994075f2dd", "content_id": "ebadee3046b34c09bf4a96fc1430c9f8e4393804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 531, "license_type": "no_license", "max_line_length": 49, "num_lines": 20, "path": "/340/C.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n,x1,y1,x2,y2 = map(int,input().split())\nflowers = []\nfor _ in range(n):\n fx,fy = map(int,input().split())\n flowers.append((fx,fy))\ndef distance(x1,y1,x2,y2):\n return ((x1-x2)**2 + (y1-y2)**2)**0.5\ns1 = [distance(fx,fy,x1,y1) for fx,fy in flowers]\ns2 = [distance(fx,fy,x2,y2) for fx,fy in flowers]\n\nmin_area = (max(s2)**2)\n\nfor i in range(n):\n r1 = s1[i]\n r2 = 0\n for j in range(n):\n if s1[j] > r1 and s2[j] > r2:\n r2 = s2[j]\n min_area = min(min_area, (r1**2 + r2**2))\nprint(round(min_area))\n" }, { "alpha_fraction": 0.47826087474823, "alphanum_fraction": 0.5050167441368103, "avg_line_length": 16.58823585510254, "blob_id": "054fe38b294fd970b0ebcc49d19e2179fb192af2", "content_id": "33cc9101541732406e591683a06eb1e0f2093e2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 38, "num_lines": 17, "path": "/340/B.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\na = list(map(int,input().split()))\n\nones = []\nfor i in range(n):\n if a[i] == 1:\n ones.append(i)\n\nif len(ones) == 0:\n print(0)\nelif len(ones) == 1:\n print(1)\nelse:\n total = 1\n for i in range(1,len(ones)):\n total *= (ones[i] - ones[i-1])\n print(total)\n" }, { "alpha_fraction": 0.389380544424057, "alphanum_fraction": 0.4011799395084381, "avg_line_length": 15.190476417541504, "blob_id": "3cc34bea12294ccb6ec5cee27082e2c7e7a5a803", "content_id": "9d7e53da0dcef7694f3a13d62d6e5e9e474f09fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 339, "license_type": "no_license", "max_line_length": 40, "num_lines": 21, "path": "/problems/1095A.cpp", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "#include <bits/stdc++.h>\nusing namespace std;\n\nint main(){\n int n;\n cin >> n;\n string s;\n cin >> s;\n int i = 0;\n int p = 0;\n vector<char> ret;\n while(p < n){\n ret.push_back(s[p]);\n i += 1;\n p += i;\n }\n for(int i = 0; i < ret.size(); i++){\n cout << ret[i];\n }\n cout << \"\\n\";\n}" }, { "alpha_fraction": 0.3905579447746277, "alphanum_fraction": 0.45493561029434204, "avg_line_length": 11.263157844543457, "blob_id": "db3919b875883bb28057f85b8c745cc50d85ba58", "content_id": "6ac301c6ae431077cd3971d815d6ccde4516313a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 233, "license_type": "no_license", "max_line_length": 19, "num_lines": 19, "path": "/401/A.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\nball = int(input())\n\nn = n % 6\n\nwhile n > 0:\n\tif n % 2 == 1:\n\t\tif ball == 0:\n\t\t\tball = 1\n\t\telif ball == 1:\n\t\t\tball = 0\n\telif n % 2 == 0:\n\t\tif ball == 1:\n\t\t\tball = 2\n\t\telif ball == 2:\n\t\t\tball = 1\n\tn -= 1\n\nprint(ball)\n" }, { "alpha_fraction": 0.4508886933326721, "alphanum_fraction": 0.47708138823509216, "avg_line_length": 22.77777862548828, "blob_id": "c0167da946fc04ddc51081218380e246b7134681", "content_id": "e08f6904c855552ab2ba8a0526b15f592098c55e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1069, "license_type": "no_license", "max_line_length": 46, "num_lines": 45, "path": "/498/F.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n, m, k = list(map(int,input().split()))\nmatrix = []\nfor _ in range(n):\n row = list(map(int, input().split()))\n matrix.append(row)\n# print(matrix)\nd = [[{} for _ in range(m)] for _ in range(n)]\n# print(d)\nmiddle = (n + m - 2) // 2\n# print(middle)\nanswer = 0\n\ndef upleft(x,y,acc,count):\n val = matrix[y][x]^acc\n if count == middle:\n if d[y][x].get(val) == None:\n d[y][x][val] = 1\n return\n else:\n d[y][x][val] += 1\n return\n else:\n if x + 1 < m:\n upleft(x+1,y,val,count+1)\n if y + 1 < n:\n upleft(x,y+1,val,count+1)\n\ndef downright(x,y,acc,count):\n global answer\n if count == n + m - 2 - middle:\n complement = k ^ acc\n if d[y][x].get(complement) != None:\n answer += d[y][x][complement]\n return\n else:\n val = matrix[y][x]^acc\n if x - 1 >= 0:\n downright(x-1,y,val,count+1)\n if y - 1 >= 0:\n downright(x,y-1,val,count+1)\n\nupleft(0,0,0,0)\ndownright(m-1,n-1,0,0)\n# print(d)\nprint(answer)" }, { "alpha_fraction": 0.5505226254463196, "alphanum_fraction": 0.5783972144126892, "avg_line_length": 14.1578950881958, "blob_id": "37022efc9cee7f92b1966f5615259f3b5bcf845f", "content_id": "61269c9ceebfd587816885b419e4e1460ed64502", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 287, "license_type": "no_license", "max_line_length": 37, "num_lines": 19, "path": "/259/B.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\nnums = list(map(int,input().split()))\n\nfirst = 0 \nfor i in range(0,n-1):\n\tif nums[i] > nums[i+1]:\n\t\tfirst = i + 1\n\nnums = nums[first:] + nums[:first]\n\nflag = True\nfor i in range(n-1):\n\tif nums[i] > nums[i+1]:\n\t\tflag = False\n\nif flag:\n\tprint((n-first)%n)\nelse:\n\tprint(-1)" }, { "alpha_fraction": 0.5057803392410278, "alphanum_fraction": 0.52601158618927, "avg_line_length": 16.350000381469727, "blob_id": "0766d384c53a0598789336d1557dea3eabf28ce9", "content_id": "2d3e462cbe96adcbc2aeb81a59240b5dc0b9f64f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 346, "license_type": "no_license", "max_line_length": 40, "num_lines": 20, "path": "/498/B.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n, k = [int(x) for x in input().split()]\na = [int(x) for x in input().split()]\n\ntemp = sorted(a, reverse = True)\ng = []\nfor i in range(k):\n\tg.append(temp[i])\nprint(sum(g))\ncurr = 0\ni = 0\nwhile i < len(a):\n\tcurr += 1\n\tif a[i] in g: \n\t\tg.remove(a[i])\n\t\tif len(g) == 0:\n\t\t\tprint(curr+len(a)-i-1)\n\t\telse:\n\t\t\tprint(curr, end = \" \")\n\t\tcurr = 0 \n\ti += 1" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.5037037134170532, "avg_line_length": 18.428571701049805, "blob_id": "856000ee988f3dea685354625eea16b271424ae5", "content_id": "aebbbcfd42055f77c3b2c8c7504fa12db030cfb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 37, "num_lines": 7, "path": "/259/C.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "m, n = list(map(int,input().split()))\n\nexp = 0\nfor k in range(1, m+1):\n\tprob = ((k/m)**n) *(1-((k-1)/k)**n)\n\texp += prob * k\nprint(exp)" }, { "alpha_fraction": 0.5317919254302979, "alphanum_fraction": 0.5549132823944092, "avg_line_length": 14.772727012634277, "blob_id": "bb9d08d745058b808fbb11a5cdbfd23abe00b560", "content_id": "f64a6398e88ff74d6cd2a0039bc83d6240cc4c11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 346, "license_type": "no_license", "max_line_length": 37, "num_lines": 22, "path": "/498/C.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\na = [int(x) for x in input().split()]\n\nlow = 0 \nhigh = len(a)-1\ncurr_max = 0 \nlsum, rsum = a[low], a[high]\nif lsum == rsum:\n\tcurr_max = lsum\nwhile high - low > 1:\n\tif lsum < rsum:\n\t\tlow += 1\n\t\tlsum += a[low]\n\telse:\n\t\thigh -= 1\n\t\trsum += a[high]\n\tif lsum == rsum:\n\t\tcurr_max = lsum\nif len(a) <= 1:\n\tprint(0)\nelse:\n\tprint(curr_max)" }, { "alpha_fraction": 0.4563573896884918, "alphanum_fraction": 0.48591065406799316, "avg_line_length": 25.454545974731445, "blob_id": "39ca2cbb8958de37d398ce26018615eaa0ce3d53", "content_id": "9a39b7d47efe74eef1f7e34eb2c15659b053fd9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1455, "license_type": "no_license", "max_line_length": 67, "num_lines": 55, "path": "/499/F.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "import sys\nsys.setrecursionlimit(1000000)\n\nn = int(input())\nop = [\"\"] * n\ninflag = [True] * n\nargs = [list() for _ in range(n)]\nfor i in range(n):\n inp = list(input().split())\n # print(inp)\n if inp[0] == \"AND\" or inp[0] == \"XOR\" or inp[0] == \"OR\":\n op[i] = inp[0]\n inflag[i] = False\n args[i].append(int(inp[1])-1)\n args[i].append(int(inp[2])-1)\n elif inp[0] == \"NOT\":\n op[i] = inp[0]\n inflag[i] = False\n args[i].append(int(inp[1])-1)\n elif inp[0] == \"IN\":\n op[i] = inp[0]\n inflag[i] = True\n args[i].append(int(inp[1]))\n\ndef flip(n):\n if n == 0:\n return 1\n if n == 1:\n return 0\n#recursive implementation, probably won't work because of python :/\ndef dfs(root):\n if op[root] == \"IN\":\n return args[root][0]\n elif op[root] == \"NOT\":\n return flip(dfs(args[root][0]))\n elif op[root] == \"AND\":\n return dfs(args[root][0]) & dfs(args[root][1])\n elif op[root] == \"OR\":\n return dfs(args[root][0]) | dfs(args[root][1])\n elif op[root] == \"XOR\":\n return dfs(args[root][0]) ^ dfs(args[root][1])\n\nans = []\ncurr = -1\nfor i in range(n):\n if inflag[i]:\n if curr == -1:\n curr = i\n args[i][0] = flip(args[i][0])\n else:\n args[curr][0] = flip(args[curr][0])\n args[i][0] = flip(args[i][0])\n curr = i\n ans.append(str(dfs(0)))\nprint(''.join(ans))\n" }, { "alpha_fraction": 0.4722222089767456, "alphanum_fraction": 0.5277777910232544, "avg_line_length": 17, "blob_id": "7a0d35c149d9897da384c834721dc9d113c75bdf", "content_id": "1174ed2d98e9b09722a55cf8d3e4d3bdab61c578", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 29, "num_lines": 2, "path": "/498/test.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n= 10\nprint([[] for _ in range(n)])\n" }, { "alpha_fraction": 0.42009133100509644, "alphanum_fraction": 0.46423134207725525, "avg_line_length": 19.53125, "blob_id": "1a8f94058354197a4c8d8510e39100d8c11cf213", "content_id": "d64fc1b57160cda3ff0c7baaac9abeb386bc5006", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 47, "num_lines": 32, "path": "/401/D.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\nss = [input().strip() for _ in range(n)]\n\n# def compare_and_truncate(s1, s2):\n# if s1 == s2:\n# return s1\n# min_l = min(len(s1), len(s2))\n# for i in range(min_l):\n# if ord(s1[i]) < ord(s2[i]):\n# return s1\n# elif ord(s1[i]) > ord(s2[i]):\n# return s1[:i]\n# return s1[:min_l]\n\ndef compare_and_truncate(s1,s2):\n if s1 > s2:\n i = 0\n while i < len(s2) and s1[i] == s2[i]:\n i += 1\n return s1[:i]\n return s1\n\n\n\nfor i in range(len(ss)-2,-1,-1):\n ss[i] = compare_and_truncate(ss[i],ss[i+1])\n\nans = \"\"\nfor s in ss:\n ans += s + \"\\n\"\n\nprint(ans)\n" }, { "alpha_fraction": 0.5037878751754761, "alphanum_fraction": 0.5549242496490479, "avg_line_length": 12.894737243652344, "blob_id": "e958ddec27a777ff2e48060a4e3389d1f2efd5fb", "content_id": "8869282599e31971747f560a1c3a77f9a9d1aa52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "no_license", "max_line_length": 31, "num_lines": 38, "path": "/401/B.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\ns1 = input()\ns2 = input()\n\nd = {}\nfor i in range(10):\n\td[i] = 0\nfor c in s2:\n\td[int(c)] += 1\n\n# min flicks\nflicks_saved = 0\nfor c in s1:\n\tdigit = int(c)\n\tfor i in range(digit,10):\n\t\tif d[i] > 0:\n\t\t\tflicks_saved += 1\n\t\t\td[i] -= 1\n\t\t\tbreak\nprint(n-flicks_saved)\n\n\nd = {}\nfor i in range(10):\n\td[i] = 0\nfor c in s2:\n\td[int(c)] += 1\n\n# max flicks \nflicks_inflicted = 0\nfor c in s1:\n\tdigit = int(c)\n\tfor i in range(digit + 1, 10):\n\t\tif d[i] > 0:\n\t\t\tflicks_inflicted += 1\n\t\t\td[i] -= 1\n\t\t\tbreak\nprint(flicks_inflicted)\n" }, { "alpha_fraction": 0.5228758454322815, "alphanum_fraction": 0.5490196347236633, "avg_line_length": 12.939393997192383, "blob_id": "fcc93d8fca92c745af1be6b7048e29c176371cbe", "content_id": "ce066ad516474f9a8dcd5c6e81685777ed3d3f90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 34, "num_lines": 33, "path": "/499/B.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n, m = map(int,input().split())\nv = list(map(int,input().split()))\n\nd = {}\nfor i in range(1,101):\n\td[i] = 0\nfor i in v:\n\td[i] += 1\n# print(d)\n\nif n > m:\n\tprint(0)\nelse:\n\tpossible = m // n\n\tflag = False\n\tret = 0\n\twhile possible > 0:\n\t\tcount = 0\n\t\tfor value in d.values():\n\t\t\tcount += value//possible\n\t\t\tif count >= n:\n\t\t\t\tflag = True\n\t\t\t\tret = possible\n\t\t\tif flag:\n\t\t\t\tbreak\n\t\tif flag:\n\t\t\tbreak\n\t\telse:\n\t\t\tpossible -= 1\n\tif flag:\n\t\tprint(ret)\n\telse:\n\t\tprint(0)" }, { "alpha_fraction": 0.46846845746040344, "alphanum_fraction": 0.5015015006065369, "avg_line_length": 12.666666984558105, "blob_id": "f6f74cf8adf9c7873e8a93b434797c970fbdb8fa", "content_id": "1d9929f021f677a1338b28698337e28272ef7de9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 333, "license_type": "no_license", "max_line_length": 34, "num_lines": 24, "path": "/499/A.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n, k = map(int,input().split())\ns = input()\n\ns = ''.join(sorted(s))\n\ndef cost(c):\n\treturn ord(c)-ord(\"a\")+1\n# print(cost(\"a\"))\ntotal = cost(s[0])\nk -= 1\ncurr = 0\ni = 1\nwhile i < n:\n\tif k == 0:\n\t\tbreak\n\tif cost(s[i])-cost(s[curr]) >= 2:\n\t\ttotal += cost(s[i])\n\t\tcurr = i \n\t\tk -= 1\n\ti += 1\nif k > 0:\n\tprint(-1)\nelse:\n\tprint(total)\n\n\n\n\n\n" }, { "alpha_fraction": 0.3741496503353119, "alphanum_fraction": 0.39115646481513977, "avg_line_length": 15.38888931274414, "blob_id": "3c83ad779450ccf176dfa5b930ca5e6ac21240d0", "content_id": "1e696c5a50605711c839694eceda94985c950341", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 294, "license_type": "no_license", "max_line_length": 32, "num_lines": 18, "path": "/problems/1091A.cpp", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "#include <bits/stdc++.h>\nusing namespace std;\n\nint main(){\n int y,b,r;\n cin >> y >> b >> r;\n int yy = 1;\n int bb = 2;\n int rr = 3;\n int total = 6;\n while(yy<y && bb<b && rr<r){\n yy++;\n bb++;\n rr++;\n total += 3;\n }\n cout << total << \"\\n\";\n}" }, { "alpha_fraction": 0.5545722842216492, "alphanum_fraction": 0.5766961574554443, "avg_line_length": 17.324323654174805, "blob_id": "897baa9474550b475a6a5ef8b102bdf559b9e91b", "content_id": "8f7027c5c9cc3c786f3b2cc643f9706be1f5fe3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "no_license", "max_line_length": 46, "num_lines": 37, "path": "/498/E.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n, q = map(int,input().split())\nofficers = map(int,input().split())\n\ns = [[] for _ in range(n)]\nc = [1] * n\nb = [0] * n\n\nfor i,v in enumerate(officers):\n\ts[v-1].append(i+1)\n\ntraversal = []\ndef dfs():\n\tstack = [0]\n\twhile stack:\n\t\tn = stack.pop()\n\t\ttraversal.append(n)\n\t\tfor child in reversed(s[n]):\n\t\t\tstack.append(child)\ndfs()\n\nfor i in range(n):\n\tb[traversal[i]] = i\n\nfor i in range(n-1,-1,-1):\n\tfor child in s[i]:\n\t\tc[i] += c[child]\n# print(c)\nanswer = []\nfor _ in range(q):\n\tu, k = map(int,input().split())\n\tstart = b[u-1]\n\tif k <= c[u-1]:\n\t\tanswer.append((str(traversal[start+k-1]+1)))\n\t\t# print(traversal[start+k-1]+1)\n\telse:\n\t\tanswer.append(\"-1\")\nprint('\\n'.join(answer))\n" }, { "alpha_fraction": 0.4220779240131378, "alphanum_fraction": 0.47186148166656494, "avg_line_length": 14.433333396911621, "blob_id": "5de73b8827366cb6230fb480bfa2ee5b6a92455c", "content_id": "ad567fb2cb8d7ac838c85617fcbbbb2d511d8054", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "no_license", "max_line_length": 34, "num_lines": 30, "path": "/499/C.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\nm = int(input())\na = list(map(int,input().split()))\nb = list(map(int,input().split()))\nstart = m\n\nif b[0]-1 <= 0:\n\tprint(-1)\nelse:\n\tm += m/(b[0]-1)\n\tflag = True\n\tfor i in range(n-1,0,-1):\n\t\tif a[i] - 1 <= 0:\n\t\t\tflag = False\n\t\t\tprint(-1)\n\t\t\tbreak\n\t\telse:\n\t\t\tm += m/(a[i]-1)\n\t\tif b[i] - 1 <= 0:\n\t\t\tflag = False\n\t\t\tprint(-1)\n\t\t\tbreak\n\t\telse:\n\t\t\tm += m/(b[i]-1)\n\tif flag:\n\t\tif a[0] - 1 <= 0:\n\t\t\tprint(-1)\n\t\telse:\n\t\t\tm += m/(a[0]-1)\n\t\t\tprint(m-start)" }, { "alpha_fraction": 0.4761904776096344, "alphanum_fraction": 0.5023809671401978, "avg_line_length": 16.45833396911621, "blob_id": "c2416ee28f405247e63a075ece1f203c23508134", "content_id": "fb5ced6c73943e2ce5536fbc5310ef3afe87d01c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 420, "license_type": "no_license", "max_line_length": 33, "num_lines": 24, "path": "/educational/1/a.cpp", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\nusing namespace std;\n\nint main() {\n\tvector<long long> v;\n\tlong long two_power;\n\tfor(int i = 0; i <= 32; ++i){\n\t\ttwo_power = (long long)1<<i;\n\t\tv.push_back(two_power);\n\t}\t\n\tint t;\n\tlong long n;\n\tlong long sum;\n\tcin >> t;\n\tfor(int i = 0; i < t; ++i){\n\t\tcin >> n;\n\t\tsum = n*(n+1)/2;\n\t\tfor(int j = 0; j <= 32; j++){\n\t\t\tif(v[j] <= n) sum -= 2 * v[j];\n\t\t}\t\t\n\t\tcout << sum << \"\\n\";\n\t}\t\n}\n\n" }, { "alpha_fraction": 0.4234234094619751, "alphanum_fraction": 0.46846845746040344, "avg_line_length": 9.090909004211426, "blob_id": "95a2fb7f3fe638651ac7f3f05e620dda313243cc", "content_id": "ad03a6098074665e13da7f73bc204124f4c3739a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 18, "num_lines": 11, "path": "/493/D.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "# n = int(input())\n\ntotal = 0\ngap = 0\ni = 0\nwhile i<=2:\n i += 1\n gap += i\n total += gap\n\nprint(total)\n" }, { "alpha_fraction": 0.47975078225135803, "alphanum_fraction": 0.5373831987380981, "avg_line_length": 19.70967674255371, "blob_id": "dc703dd5ac5d52c821cf4e954a7f48a3b0228f45", "content_id": "919f1cabf58797ae53f69aac849d8b6580199203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 642, "license_type": "no_license", "max_line_length": 43, "num_lines": 31, "path": "/493/C.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n,x,y = map(int,input().split())\ns = input()\n\n# x = cost of reverse\n# y = cost of invert\n\nchains = 0\n\nzero = False\nfor i in range(n):\n if not zero and s[i] == \"0\":\n zero = True\n elif not zero and s[i] == \"1\":\n continue\n elif zero and s[i] == \"0\":\n continue\n elif zero and s[i] == \"1\":\n chains += 1\n zero = False\nif zero:\n chains += 1\n\nif chains == 0:\n print(0)\nelse:\n min_cost = 99999999999999999999999999\n for n_invert in range(1,chains+1):\n n_reverse = chains - n_invert\n cost = n_reverse * x + n_invert * y\n min_cost = min(min_cost, cost)\n print(min_cost)\n" }, { "alpha_fraction": 0.543749988079071, "alphanum_fraction": 0.5625, "avg_line_length": 22, "blob_id": "e051fa3d50ba398901cf96be1d3185cd2f2a619d", "content_id": "98e1e7422492ecb26a75f3b5e08123d065ff575a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 39, "num_lines": 7, "path": "/498/A.py", "repo_name": "kl738/codeforces", "src_encoding": "UTF-8", "text": "n = int(input())\nlst = [int(x) for x in input().split()]\nfor i, v in enumerate(lst):\n\tif v%2==0:\n\t\tlst[i] = v-1\nlst = [str(x) for x in lst]\nprint(' '.join(lst))" } ]
40
relax-man/Djnago-Base
https://github.com/relax-man/Djnago-Base
938f1dd90cf9894e646181e4d3611c7111ae916c
5fa0a79cd5371abc86cc168de90fe9f7654c2ffb
b09f18badb922164e3641f7d6b34bdda33f48069
refs/heads/master
2023-03-21T15:38:40.198017
2021-03-04T06:37:24
2021-03-04T06:43:33
344,096,138
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5024154782295227, "alphanum_fraction": 0.695652186870575, "avg_line_length": 16.25, "blob_id": "0eb0bcbcc5dc4ed31fb86d1d0df1b2c465e89cbb", "content_id": "a5e7904634dec436881ed6e9ab99cd0aa34b2fa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 207, "license_type": "no_license", "max_line_length": 22, "num_lines": 12, "path": "/requirements.txt", "repo_name": "relax-man/Djnago-Base", "src_encoding": "UTF-8", "text": "asgiref==3.3.1\ncolorama==0.4.4\ncolorlog==4.7.2\ndj-database-url==0.5.0\nDjango==3.1.7\ndjango-cleanup==5.1.0\ndjango-heroku==0.3.1\ngunicorn==20.0.4\npsycopg2==2.8.6\npytz==2021.1\nsqlparse==0.4.1\nwhitenoise==5.2.0\n" }, { "alpha_fraction": 0.4553283154964447, "alphanum_fraction": 0.4650161564350128, "avg_line_length": 20.604650497436523, "blob_id": "b6e77cba686eb09ab722b2ddf71bc0bc70513f40", "content_id": "3668aacab18c7abed5346ae626cd88d6c13744a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "no_license", "max_line_length": 62, "num_lines": 43, "path": "/src/settings/local.py", "repo_name": "relax-man/Djnago-Base", "src_encoding": "UTF-8", "text": "from settings.base import *\n\nSECRET_KEY = None\nDEBUG = True\n\nALLOWED_HOSTS = ['localhost', '127.0.0.1']\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'special': {\n '()': 'colorlog.ColoredFormatter',\n 'format': '%(log_color)s[%(asctime)s] %(message)s'\n }\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'filters': ['require_debug_true'],\n 'class': 'logging.StreamHandler',\n 'formatter': 'special'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n }\n }\n}\n" }, { "alpha_fraction": 0.6024691462516785, "alphanum_fraction": 0.6024691462516785, "avg_line_length": 15.916666984558105, "blob_id": "b922e7c4622701be7241dbb8733e2bee8f66ffdd", "content_id": "d5f361222c3fb69ebb64e20bac167e0a80ea1d57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 405, "license_type": "no_license", "max_line_length": 66, "num_lines": 24, "path": "/README.md", "repo_name": "relax-man/Djnago-Base", "src_encoding": "UTF-8", "text": "## Base Django-Heroku project\n\n**TODO:**\n\n- Generate secret_key\n ```\n from django.core.management.utils import get_random_secret_key\n print(get_random_secret_key())\n ```\n\n- Create virtual env\n ```\n python -m venv env\n pip install -r requirements.txt\n ```\n\n- Remove comments in .gitignore\n ```\n # VsCode\n .vscode/\n \n # Local settings\n src/settings/local.py\n ```" }, { "alpha_fraction": 0.7278688549995422, "alphanum_fraction": 0.7278688549995422, "avg_line_length": 18.0625, "blob_id": "5eaa857752caa63178cb90fae848857ac345dfce", "content_id": "cc4eb6911988ee5eb6c232165edc63fa4514ad37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 77, "num_lines": 16, "path": "/src/settings/heroku.py", "repo_name": "relax-man/Djnago-Base", "src_encoding": "UTF-8", "text": "from settings.base import *\n\nimport dj_database_url\nimport django_heroku\nimport os\n\nSECRET_KEY = os.environ.get('SECRET_KEY')\nDEBUG = False\n\nALLOWED_HOSTS = ['.herokuapp.com']\n\nDATABASES = {\n 'default': dj_database_url.config(default=os.environ.get('DATABASE_URL'))\n}\n\ndjango_heroku.settings(locals())\n" } ]
4
cFireworks/kmeans
https://github.com/cFireworks/kmeans
5081de323176226bdf0c4b86442bb20e1385d8df
94316ee85e343b894a15fcb3399d5c9b14fb1342
6d13853a8171ebbeda908705e4128b5bd3ed43f7
refs/heads/master
2021-06-16T09:44:40.976710
2021-04-21T11:12:28
2021-04-21T11:12:28
191,701,180
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5732483863830566, "alphanum_fraction": 0.5801486372947693, "avg_line_length": 29.634145736694336, "blob_id": "6b42b24c537eb07eda8b8f6e55233506ab4ca98d", "content_id": "da974b757fbe743de427a57eb1fd9ccb87e9e985", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3964, "license_type": "no_license", "max_line_length": 84, "num_lines": 123, "path": "/k_means.py", "repo_name": "cFireworks/kmeans", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef centroids_init(X, n_clusters, mode='random'):\n \"\"\"\n 初始化中心点\n \"\"\"\n n_samples, n_features = X.shape\n centroids = np.empty((n_clusters, n_features), dtype=X.dtype)\n if mode == 'random':\n random_state = np.random.mtrand._rand\n seeds = random_state.permutation(n_samples)[:n_clusters]\n centroids = X[seeds]\n elif mode == 'kmeans++':\n # n_local_trials = 2 + int(np.log(n_clusters))\n random_state = np.random.mtrand._rand\n\n # select the first center randomly\n index_0 = np.random.randint(0, n_samples)\n centroids[0] = X[index_0]\n\n for i in range(1, n_clusters):\n # compute the distances to known-centers\n dist = compute_dist(X, centroids[:i])\n min_dist = dist.min(axis=1)\n prob = min_dist / min_dist.sum()\n # 依概率随机选取下一个中心点\n index = np.random.choice(np.arange(len(prob)), p=prob.ravel())\n centroids[i] = np.copy(X[index])\n return centroids-+-\n\n\ndef compute_dist(X, Y):\n \"\"\"\n 使用矩阵乘法的方法,计算样本点与中心点距离平方\n \"\"\"\n XX = np.sum(X*X, axis=1)[:, np.newaxis]\n YY = np.sum(Y*Y, axis=1)\n XY = np.dot(X, Y.T)\n return np.maximum(XX + YY - 2 * XY, 0)\n\n\ndef update_centers(X, n_clusters, labels, distances):\n \"\"\"\n 更新中心点,解决中心点偏离的问题\n \"\"\"\n n_features = X.shape[1]\n\n num_in_cluster = np.zeros((n_clusters,))\n centers = np.zeros((n_clusters, n_features))\n\n # 寻找空类\n for i in range(n_clusters):\n num_in_cluster[i] = (labels == i).sum()\n empty_clusters = np.where(num_in_cluster == 0)[0]\n\n if len(empty_clusters):\n far_from_centers = distances.argsort()[::-1]\n\n for i, cluster_id in enumerate(empty_clusters):\n far_index = far_from_centers[i]\n centers[cluster_id] = X[far_index]\n num_in_cluster[cluster_id] = 1\n\n for i in range(n_clusters):\n centers[i] += X[labels == i].sum(axis=0)\n\n centers /= num_in_cluster[:, np.newaxis]\n\n return centers\n\n\ndef k_init(X, ):\n return\n\n\ndef k_means(X, n_clusters, max_iter, init_mode='kmeans++', verbose=False, tol=1e-4):\n best_labels, best_inertia, best_centers = None, None, None\n # init\n n_samples = X.shape[0]\n centers = centroids_init(X, n_clusters, init_mode)\n\n # Allocate memory to store the distances for each sample to its\n # closer center for reallocation in case of ties\n distances = np.zeros(shape=(n_samples,), dtype=X.dtype)\n\n # iterations\n for i in range(max_iter):\n Y = centers.copy()\n # 计算样本点到中心点的欧式距离\n dist = compute_dist(X, Y)\n # 记录样本点距离最近的中心点序号\n labels = dist.argmin(axis=1)\n\n distances = dist[np.arange(dist.shape[0]), labels]\n inertia = distances.sum()\n\n # 计算新的中心点\n centers = update_centers(X, n_clusters, labels, distances)\n\n if best_inertia is None or inertia < best_inertia:\n best_labels = labels.copy()\n best_centers = centers.copy()\n best_inertia = inertia\n\n d_center = np.ravel(Y - centers, order='K')\n center_shift_total = np.dot(d_center, d_center)\n if center_shift_total <= tol:\n if verbose:\n print(\"Converged at iteration %d: \"\n \"center shift %e within tolerance %e\"\n % (i, center_shift_total, tol))\n break\n\n if center_shift_total > 0:\n # rerun E-step in case of non-convergence so that predicted labels\n # match cluster centers\n dist = compute_dist(X, best_centers)\n best_labels = dist.argmin(axis=1)\n distances = dist[np.arange(dist.shape[0]), best_labels]\n best_inertia = distances.sum()\n\n return best_labels, best_inertia, best_centers, i + 1\n" }, { "alpha_fraction": 0.5850684642791748, "alphanum_fraction": 0.5974488854408264, "avg_line_length": 32.318748474121094, "blob_id": "329822d844fe20c3bff73d417ff6594b43eb60fb", "content_id": "76781fb97c2df2644532ee7ea22992f0986f611d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5331, "license_type": "no_license", "max_line_length": 79, "num_lines": 160, "path": "/main.py", "repo_name": "cFireworks/kmeans", "src_encoding": "UTF-8", "text": "from sklearn.cluster import KMeans\nfrom k_means import k_means\n\nimport numpy as np\nfrom keras.datasets import mnist\nimport time\n\nfrom eval import ClusterEval\n\n# load data\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# data dimension\nraw_dim = 28 * 28 # raw dimension\nlow_dim = 200 # random projection to low-dimension\n\n# random_projection matrix\nrj_matrix = 1.0 - 2.0 * (np.random.rand(raw_dim, low_dim) > 0.5)\nrj_matrix = rj_matrix / np.sqrt(low_dim)\nprint(np.sum(rj_matrix), np.max(rj_matrix), np.min(rj_matrix))\n\n# choose data\ntrain_num = 20000\ndata = train_images[0: train_num].reshape(\n (train_num, raw_dim)) / 255. # X\nlabels = train_labels[0: train_num] # y\n\n\ndef cluster_sklearn_kmeans(data, n_cluster=10):\n # using kmeans on raw data\n # @return cluster labels\n print(\"Begin sklearn clustering on raw data...\")\n print(\"Data shape = \", data.shape)\n start = time.time()\n kmeans = KMeans(n_clusters=n_cluster)\n kmeans.fit(data)\n end = time.time()\n print(\"Clustering on raw data, using time = \", end - start)\n return kmeans.labels_\n\n\ndef my_cluster_my_kmeans(data, n_cluster=10):\n # using kmeans on raw data\n # @return cluster labels\n print(\"Begin my clustering on raw data...\")\n print(\"Data shape = \", data.shape)\n start = time.time()\n labels, _, _, _ = k_means(data, n_clusters=n_cluster, max_iter=300)\n end = time.time()\n print(\"Clustering on raw data, using time = \", end - start)\n return labels\n\n\ncluster_fn = [cluster_sklearn_kmeans, my_cluster_my_kmeans]\n\n\ndef cluster_on_rj_data(data, dim=100, function_name=my_cluster_my_kmeans):\n # using random projection to reduce the dimension of raw data, then cluster\n # @return cluster labels\n print(\"Begin clustering on low-dimension data...\")\n print(\"Data shape = \", data.shape)\n\n print(\"First random projection...\")\n start = time.time()\n rj_data = np.dot(data, rj_matrix)\n end = time.time()\n print(\"Random projection time = \", end - start)\n\n print(\"Second kmeans...\")\n labels = function_name(rj_data, n_cluster=10)\n return labels\n\n\ndef cluster_on_rs_data(data, p=0.01, function_name=my_cluster_my_kmeans):\n # using random sparsification to sparse raw data, then cluster\n # @return cluster labels\n print(\"Begin clustering on sparsed data...\")\n print(\"Data shape = \", data.shape)\n\n print(\"First random projection...\")\n start = time.time()\n rj_data = np.dot(data, rj_matrix)\n end = time.time()\n print(\"Random projection time = \", end - start)\n\n print(\"Second random sparsification...\")\n start = time.time()\n # construct random sparsification matrix\n n = rj_data.shape[0] # the number of data points\n max_v = np.max(np.abs(rj_data)) # max value\n tau = p * ((rj_data / max_v) ** 2) # tau_ij\n\n # sparsification probability\n prob = np.zeros_like(tau, dtype=np.float32)\n sqrt_tau = 64. * np.sqrt(tau / n) * np.log(n) * np.log(n)\n\n prob[tau > sqrt_tau] = tau[tau > sqrt_tau]\n prob[tau <= sqrt_tau] = sqrt_tau[tau <= sqrt_tau]\n\n sparse_map = np.random.rand(rj_data.shape[0], rj_data.shape[1]) <= prob\n\n # sparsification\n rs_data = rj_data.copy()\n index = (prob != 0.0) & (sparse_map == 1.0)\n rs_data[index] = rs_data[index] / \\\n prob[index] # data[i][j]/prob[i][j]\n rs_data[sparse_map == 0.0] = 0.0 # data[i][j] = 0.0\n\n end = time.time()\n print(\"Random projection time = \", end - start)\n\n print(\"Before sparsification, the number of zero-elements is:\",\n np.sum(rj_data == 0.0)/(rj_data.shape[0] * rj_data.shape[1]))\n print(\"After sparsification, the number of zero-elements is:\",\n np.sum(rs_data == 0.0)/(rs_data.shape[0] * rs_data.shape[1]))\n\n print(\"Second kmeans...\")\n labels = function_name(rs_data, n_cluster=10)\n return labels\n\n\ndef analysis_and_plot(data, clu_labels, labels=None):\n # analyse the cluster result, CP, SP, RI, ARI, FusionMatrix\n # @params data : numpy.array\n # @params clu_labels : clustered labels\n # @params labels : real labels\n\n evaler = ClusterEval(data, clu_labels, labels)\n print(\"CP = \", evaler.CP)\n print(\"SP = \", evaler.SP)\n if isinstance(labels, np.ndarray):\n print(\"RI = \", evaler.RI)\n print(\"ARI = \", evaler.ARI)\n '''\n print(\"Confusion matrix:\")\n for row in evaler.norm_labels_grid:\n print(list(row))\n plt.figure()\n plt.imshow(evaler.norm_labels_grid)\n plt.show()\n '''\n\n# print(\"###################################\")\n# print(\"Cluster on raw data and evaluate...\")\n# clu_labels = cluster_on_raw_data(data)\n# analysis_and_plot(data, clu_labels, labels)\n# print(\"###################################\")\n\n# print(\"###################################\")\n# print(\"my Cluster on raw data and evaluate...\")\n# clu_labels = my_cluster_on_raw_data(data)\n# analysis_and_plot(data, clu_labels, labels)\n# print(\"###################################\")\n\n\nprint(\"###################################\")\nprint(\"Cluster on random sparsification data and evaluate...\")\nclu_labels = cluster_on_rs_data(data)\nanalysis_and_plot(data, clu_labels, labels)\nprint(\"###################################\")\n" }, { "alpha_fraction": 0.8260869383811951, "alphanum_fraction": 0.8260869383811951, "avg_line_length": 10.5, "blob_id": "35bde2c2c03c2e49df07c480bebf0e1019986507", "content_id": "96adc8bb40333593655199961eca9d972f046c7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 37, "license_type": "no_license", "max_line_length": 13, "num_lines": 2, "path": "/README.md", "repo_name": "cFireworks/kmeans", "src_encoding": "UTF-8", "text": "# kmeans\n实现的一个kmeans算法\n" }, { "alpha_fraction": 0.532677173614502, "alphanum_fraction": 0.5415354371070862, "avg_line_length": 38.06922912597656, "blob_id": "2b3a40889951c0c6d6e07ddea2a99dd6bf2d680e", "content_id": "f08d7371695d2b2bb9b7b1a920e8399f234a3e17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5080, "license_type": "no_license", "max_line_length": 114, "num_lines": 130, "path": "/eval.py", "repo_name": "cFireworks/kmeans", "src_encoding": "UTF-8", "text": "# -*- coding : utf-8 -*-\n\n### some methods to evaluate kmeans clustering results\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\nclass ClusterEval():\n def __init__(self, data, clu_labels, labels = None):\n ### init function\n ### @params data : numpy.array source data\n ### @params clu_labels : numpy.array cluster labels\n ### @params labels : source labels, if no labels available, None\n self.data = data\n self.clu_labels = clu_labels\n self.labels = labels\n\n self.n_data = data.shape[0] # data number\n self.n_clusters = len(np.unique(clu_labels)) # the number of clusters\n if isinstance(labels, np.ndarray):\n self.n_classes = len(np.unique(labels)) # real class number\n\n self.centers = self.calc_centers() # find centers\n\n self.CP = self.compactness()\n self.SP = self.separation()\n\n self.labels_grid = self.calc_labels_grid() # labels fusion matrix\n self.norm_labels_grid = self.labels_grid / np.sum(self.labels_grid, axis = 1).reshape(-1, 1) # normlize\n\n self.RI = self.rand_index()\n self.ARI = self.adjust_rand_index()\n\n def calc_centers(self):\n ### calculate centers using clu_labels and data\n ### @return numpy.array\n centers = []\n for k in range(self.n_clusters):\n centers.append(np.mean(self.data[self.clu_labels == k, :]))\n centers = np.array(centers)\n print(centers.shape)\n return centers\n\n def compactness(self):\n ### compute the target function, eval inner-cluster distance\n ### the lower the better\n CP = 0.0\n for k in range(self.n_clusters):\n indexes = np.array(range(self.n_data))[self.clu_labels == k]\n clu_data = self.data[indexes, :]\n center = self.centers[k]\n CP += np.mean(np.sum(np.square(clu_data - center.reshape(1, -1)), axis = 1))\n return CP\n\n def separation(self):\n ### compute the between-cluster distance\n ### the higher the better\n SP = 0.0\n for k in range(self.n_clusters):\n dis2 = np.sum(np.square(self.centers - self.centers[k].reshape(1, -1)), axis = 1)\n SP += np.sum(np.sqrt(dis2))\n SP = 2 * SP / (self.n_clusters * (self.n_clusters - 1))\n return SP\n\n def calc_labels_grid(self):\n ### labels available, compute labels fusion matrix\n ### row-axis is cluster labels, col-axis is real labels\n if not isinstance(self.labels, np.ndarray):\n return None\n grid = np.zeros((self.n_clusters, self.n_clusters))\n for k in range(self.n_clusters):\n indexes = np.array(range(self.n_data))[self.clu_labels == k]\n real_labels = self.labels[indexes]\n for j in range(self.n_classes):\n grid[k][j] = np.sum(real_labels == j)\n return grid\n\n\n def rand_index(self):\n ### labels available, rand index\n ### the higher the better\n if not isinstance(self.labels, np.ndarray):\n return None\n # brute force, for every pair\n #tp = 0 # true positive, same cluster clustered in the same cluster\n #tn = 0 # true negative, different cluster clustered in the different cluster\n #for i in range(self.n_data):\n # for j in range(self.n_data):\n # if self.labels[i] == self.labels[j] and self.clu_labels[i] == self.clu_labels[j]:\n # tp += 1\n # if self.labels[i] != self.labels[j] and self.clu_labels[i] != self.clu_labels[j]:\n # tn += 1\n #RI = 2.0 * (tp + tn)/(self.n_data * (self.n_data - 1))\n\n RI = 0.0\n for i in range(self.n_clusters):\n for j in range(self.n_classes):\n a = self.labels_grid[i][j]\n RI += a * (a - 1) / 2\n RI = RI / (self.n_data * (self.n_data - 1))\n return RI\n\n def adjust_rand_index(self):\n ### labels available, adjust rand index\n ### ARI = (RI - E[RI]) / (MaxRI -E[RI])\n ### the higher the better\n if not isinstance(self.labels, np.ndarray):\n return None\n sum_labels = np.sum(self.labels_grid, axis = 0) # sum by col\n sum_clu_labels = np.sum(self.labels_grid, axis = 1) # sum by row\n \n Index = 0 # RI\n ExpectedIndex = 0 # E[RI]\n MaxIndex = 0 # MaxRI\n \n # calculate RI\n for i in range(self.n_clusters):\n for j in range(self.n_classes):\n a = self.labels_grid[i][j]\n Index += a * (a - 1)/2\n \n # calculate E[RI] and MaxRI\n sum_a = sum([x * (x - 1) / 2 for x in sum_labels])\n sum_b = sum([x * (x - 1) / 2 for x in sum_clu_labels])\n ExpectedIndex = 2 * sum_a * sum_b / (self.n_data * (self.n_data - 1))\n MaxIndex = (sum_a + sum_b) / 2\n\n ARI = (Index - ExpectedIndex) / (MaxIndex - ExpectedIndex)\n return ARI\n\n" } ]
4
hungntit/scripts
https://github.com/hungntit/scripts
30c117bb48cf1665ff13b1b899a4b97d5903af23
44810c5b86a172a59f0521263f862822abbdf467
334b81cf631c69c50d33b72731ce4de3ed459e4b
refs/heads/master
2021-12-14T21:07:15.256115
2021-12-07T10:24:58
2021-12-07T10:24:58
90,940,667
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 33.61538314819336, "blob_id": "ca1abe3c02c2f79fc562f724dc009c383a8938e4", "content_id": "ef323024807cd057bf01248ef4084aac91012f25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 450, "license_type": "no_license", "max_line_length": 116, "num_lines": 13, "path": "/tunel_redis.sh", "repo_name": "hungntit/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\nDB_ENDPOINT=$1\nEC2_IP=$2\nif [ -z ${DB_ENDPOINT} ];then\n DB_ENDPOINT=`aws elasticache describe-cache-clusters --show-cache-node-info|grep Address| tail -n 1|cut -d'\"' -f4`\nfi\nif [ -z ${EC2_IP} ];then\n EC2_IP=`aws ec2 describe-instances|grep PublicIp|tail -n 1|cut -d'\"' -f4`\nfi\necho \"ssh -N -L 6379:$DB_ENDPOINT:6379 ec2-user@${EC2_IP}\"\necho \"Runing ...\"\necho \"redis-cli -h localhost\"\nssh -N -L 6379:$DB_ENDPOINT:6379 ec2-user@${EC2_IP}\n" }, { "alpha_fraction": 0.747474730014801, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 98, "blob_id": "36b39d31c291cd0d1fe79409b71c2b4363ba32ce", "content_id": "dfee1ed1501ec758a2928b57bd656e70cf47f8ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 198, "license_type": "no_license", "max_line_length": 180, "num_lines": 2, "path": "/delete_aws_loggroup.sh", "repo_name": "hungntit/scripts", "src_encoding": "UTF-8", "text": "LOGGROUP_NAME=$1\nfor logName in `aws logs describe-log-groups |grep $LOGGROUP_NAME|grep logGroupName|cut -d'\"' -f4`; do echo delete $logName;aws logs delete-log-group --log-group-name $logName;done\n" }, { "alpha_fraction": 0.5913242101669312, "alphanum_fraction": 0.6598173379898071, "avg_line_length": 32.69230651855469, "blob_id": "c1d26dc18cb6891fa435ae7f6c4ab2198e0dccd8", "content_id": "2d2e0f2ba0f292cde1d719253d162644ac47e8eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 438, "license_type": "no_license", "max_line_length": 83, "num_lines": 13, "path": "/tunel_psql.sh", "repo_name": "hungntit/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\nDB_ENDPOINT=$1\nEC2_IP=$2\nif [ -z ${DB_ENDPOINT} ];then\n DB_ENDPOINT=`aws rds describe-db-instances|grep Address| tail -n 1|cut -d'\"' -f4`\nfi\nif [ -z ${EC2_IP} ];then\n EC2_IP=`aws ec2 describe-instances|grep PublicIp|tail -n 1|cut -d'\"' -f4`\nfi\necho \"ssh -N -L 5432:$DB_ENDPOINT:5432 ec2-user@${EC2_IP}\"\necho \"Runing ...\"\necho \"psql -h localhost -U postgres -W messagepay\"\nssh -N -L 5432:$DB_ENDPOINT:5432 ec2-user@${EC2_IP}\n" }, { "alpha_fraction": 0.5656565427780151, "alphanum_fraction": 0.6212121248245239, "avg_line_length": 23.75, "blob_id": "7efe00817eb2e14ae7b4fff243bb7dd3034921cf", "content_id": "fd1d9145fff3b6b2f654eb7863fe5035654b7647", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 198, "license_type": "no_license", "max_line_length": 75, "num_lines": 8, "path": "/ssh_ec2.sh", "repo_name": "hungntit/scripts", "src_encoding": "UTF-8", "text": "#!/bin/bash\nEC2_IP=$1\nif [ -z ${EC2_IP} ];then\n EC2_IP=`aws ec2 describe-instances|grep PublicIp|tail -n 1|cut -d'\"' -f4`\nfi\necho \"ssh ec2-user@${EC2_IP}\"\necho \"Runing ...\"\nssh ec2-user@${EC2_IP}\n" }, { "alpha_fraction": 0.6499999761581421, "alphanum_fraction": 0.6600000262260437, "avg_line_length": 32.33333206176758, "blob_id": "4c232d59a3b7bc80b23f07b12306fd5d79c2948c", "content_id": "520a39e5f0713ea8ed5215824fa0c3e691cb7c2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 77, "num_lines": 6, "path": "/list_export.py", "repo_name": "hungntit/scripts", "src_encoding": "UTF-8", "text": "import boto3\n\nclient = boto3.client('cloudformation')\nresponse = client.list_exports()\nfor export in response['Exports']:\n print(\"{}='{}'\".format(export['Name'].replace('-','_'), export['Value']))\n" } ]
5
Amperture/nexsmart-sensebar-server
https://github.com/Amperture/nexsmart-sensebar-server
e019466e57628c1e8e9398d1c99733ca77acc068
143a6b280d74fe16ccc770162cfa2bfe3b25355b
41236eeafdb5b621170d806aae70ec0771a078e6
refs/heads/master
2020-04-25T15:09:16.434413
2014-04-23T20:29:54
2014-04-23T20:29:54
42,281,444
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7633135914802551, "alphanum_fraction": 0.7751479148864746, "avg_line_length": 17.66666603088379, "blob_id": "2ba71c4660326221d672ed8a10b2ce84afbe0e33", "content_id": "dba36ffb935f4e43895a8c78534891ecbc79f33f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 169, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/display-script.sh", "repo_name": "Amperture/nexsmart-sensebar-server", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n/root/lightdisplay/display-time\n\n/root/lightdisplay/display-outside-temp \n\nhcitool lescan --duplicates &\n/root/lightdisplay/display-inside-temp &\nsleep 10\n\n" }, { "alpha_fraction": 0.4982244372367859, "alphanum_fraction": 0.5344460010528564, "avg_line_length": 29.60869598388672, "blob_id": "2163dca0e5192800e90677460b11309b8519d2a8", "content_id": "eaad7582c6302315961ee1e6d1fa90890ddf9887", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2816, "license_type": "no_license", "max_line_length": 79, "num_lines": 92, "path": "/clock-temp", "repo_name": "Amperture/nexsmart-sensebar-server", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport Adafruit_BBIO.UART as uart\nimport serial\nfrom time import sleep\nfrom time import strftime as time\nimport subprocess\n\nimport json\nimport pycurl\nfrom StringIO import StringIO\n\nuart.setup(\"UART1\")\nser=serial.Serial(port = \"/dev/ttyO1\", baudrate=9600)\nproc = subprocess.Popen(['hcidump --raw '], stdout=subprocess.PIPE, shell=True)\n\nser.close()\nser.open()\n\nTIME_MINUTES_TENS = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nTIME_MINUTES_ONES = [ ')', '!', '@', '#', '$', '%', '^', '&', '*', '(']\nTIME_HOURS_TENS = [ 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't']\nTIME_HOURS_ONES = [ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T']\nTEMP_IN_TENS = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']\nTEMP_IN_ONES = [ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']\nTEMP_OUT_TENS = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nTEMP_OUT_ONES = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\ntemperature = 0\n#temp_Write = \"1!\"\n#ser.write(temp_Write)\n\nbuffer = StringIO()\nc = pycurl.Curl()\n\nwhile True:\n time_Minutes = TIME_MINUTES_ONES[ int(time(\"%M\")) %10] + \\\n TIME_MINUTES_TENS[int(time(\"%M\")) / 10]\n time_Hours = TIME_HOURS_ONES[ int(time(\"%H\")) %10] + \\\n TIME_HOURS_TENS[int(time(\"%H\")) / 10]\n c.setopt(c.URL, \"http://45.55.157.126/api/current_conditions\")\n c.setopt(c.WRITEFUNCTION, buffer.write)\n c.perform()\n\n ''' CODE TO USE WITH PUCK, DO NOT RUN IF PUCK IS NOT ON\n\n for x in range(0, 10):\n puck_output = proc.stdout.readline()\n if \"84 29\" in puck_output and \">\" not in puck_output:\n temperature = ((int(puck_output.split()[8], 16) << 8) \\\n + int(puck_output.split()[7], 16))/10.0\n\n print temperature\n temperature = (temperature*18 +320)/10\n print temperature\n if temperature - (int(temperature)) >= 0.5:\n temperature = int(temperature) + 1\n else: \n temperature = int(temperature)\n\n #print temperature\n temp_Write = TIME_MINUTES_TENS[temperature / 10] \\\n + TIME_MINUTES_ONES[(temperature) % 10]\n '''\n body = buffer.getvalue()\n buffer.truncate(0)\n bodyjson = json.loads(body)[\"current_observation\"][\"temp_f\"]\n temp_Outside = TIME_MINUTES_ONES[(int(bodyjson % 10))] + \\\n TIME_MINUTES_TENS[(int(bodyjson/10))]\n\n\n #print time(\"%M %s\")\n #print time_Minutes\n if ser.isOpen():\n #print \"Serial Open!\"\n \n #print time_Minutes\n #ser.write(time_Minutes)\n sleep(0.1)\n\n #print time_Hours\n ser.write(time_Hours)\n sleep(0.1)\n\n #print temp_Write\n #print temperature\n #ser.write(temp_Write)\n sleep(0.1)\n\n #print temp_Outside\n ser.write(temp_Outside)\n sleep(0.1)\n" }, { "alpha_fraction": 0.5170278549194336, "alphanum_fraction": 0.5529412031173706, "avg_line_length": 29.471698760986328, "blob_id": "187bc6c8dd35b39bebfc607d204a4d7ac4bef323", "content_id": "aaf79e8972f4ab1e279a5ea57a3cd4d4485971c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1615, "license_type": "no_license", "max_line_length": 73, "num_lines": 53, "path": "/display-outside-temp", "repo_name": "Amperture/nexsmart-sensebar-server", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport Adafruit_BBIO.UART as uart\nimport serial\nimport json\nimport pycurl\nfrom StringIO import StringIO\n\nTIME_MINUTES_TENS = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nTIME_MINUTES_ONES = [ ')', '!', '@', '#', '$', '%', '^', '&', '*', '(']\nTIME_HOURS_TENS = [ 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't']\nTIME_HOURS_ONES = [ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T']\nTEMP_IN_TENS = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']\nTEMP_IN_ONES = [ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']\nTEMP_OUT_TENS = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nTEMP_OUT_ONES = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\nuart.setup(\"UART1\")\nser=serial.Serial(port = \"/dev/ttyO1\", baudrate=9600)\n\nser.close()\nser.open()\n\n\nc = pycurl.Curl()\nbuffer = StringIO()\nc.setopt(c.URL, \"http://45.55.157.126/api/current_conditions\")\nc.setopt(c.WRITEFUNCTION, buffer.write)\nc.perform()\n\n#print \"Internet data CURL'd\"\n\nbody = buffer.getvalue()\nbuffer.truncate(0)\nbodyjson = json.loads(body)[\"current_observation\"][\"temp_f\"]\n#print \"It is currently \" + str(bodyjson) + \" degrees Farenheit outside!\"\n\n# For the breadboard test\n\ntemp_Outside = TIME_MINUTES_ONES[ int(bodyjson % 10) ] + \\\n TIME_MINUTES_TENS[ int(bodyjson/10) % 10]\n#print \"Temperature Commands Calculated!\"\n\n\n\n# For final production\n#temp_Outside = TEMP_OUT_ONES[ int(bodyjson % 10) ] + \\\n #TEMP_OUT_TENS[ int(bodyjson/10) % 10]\n\nif ser.isOpen()\n ser.write(temp_Outside)\n #print \"Temperature data sent to serial port!\"\n ser.close()\n #print \"Serial Port Closed\"\n" }, { "alpha_fraction": 0.4983202815055847, "alphanum_fraction": 0.5251959562301636, "avg_line_length": 26.90625, "blob_id": "c92cd29870179be399a49b45ab62e3b9b68ac379", "content_id": "753a30b772a9c7bc19d1179ccd8abbb92a6b715a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 893, "license_type": "no_license", "max_line_length": 71, "num_lines": 32, "path": "/display-time", "repo_name": "Amperture/nexsmart-sensebar-server", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport Adafruit_BBIO.UART as uart\nimport serial\nfrom time import strftime as time\nimport json \nimport pycurl\n\nuart.setup(\"UART1\")\nser=serial.Serial(port = \"/dev/ttyO1\", baudrate=9600)\n\nser.close()\nser.open()\n\nTIME_MINUTES_TENS = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nTIME_MINUTES_ONES = [ ')', '!', '@', '#', '$', '%', '^', '&', '*', '(']\nTIME_HOURS_TENS = [ 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't']\nTIME_HOURS_ONES = [ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T']\n\ntime_Minutes = TIME_MINUTES_ONES[ int(time(\"%M\")) %10] + \\\n TIME_MINUTES_TENS[int(time(\"%M\")) / 10]\ntime_Hours = TIME_HOURS_ONES[ int(time(\"%H\")) %10] + \\\n TIME_HOURS_TENS[int(time(\"%H\")) / 10]\n\nif ser.isOpen():\n '''\n print time(\"%M\")\n print time(\"%H\")\n print time_Minutes\n print time_Hours\n '''\n ser.write(time_Minutes + time_Hours)\n ser.close()\n" }, { "alpha_fraction": 0.5471100807189941, "alphanum_fraction": 0.5748218297958374, "avg_line_length": 27.066667556762695, "blob_id": "48bc6eb3bc0175da546b6ef054c04e06a4d81a8a", "content_id": "8e38e1fd329a8a1952ff4980628fbc617171dc51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1263, "license_type": "no_license", "max_line_length": 80, "num_lines": 45, "path": "/lightbar", "repo_name": "Amperture/nexsmart-sensebar-server", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport Adafruit_BBIO.UART as uart\nimport serial\nfrom time import sleep\nfrom time import strftime as time\nimport subprocess\n\nuart.setup(\"UART2\")\nser=serial.Serial(port = \"/dev/ttyO2\", baudrate=9600)\n\nser.close()\nser.open()\n\nWATER_STORED = [ 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's']\nWATER_USED = [ 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S']\nENERGY_STORED = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\nENERGY_USED = [ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']\n\ni = 0\nsendEnergyStored = 0\nsendEnergyUsed = 0\n\n#while True:\nfor x in range(0,10):\n i = i+1\n if (i % 137 == 0):\n sendEnergyStored = sendEnergyStored + 1\n if sendEnergyStored > 8:\n sendEnergyStored = 0\n\n if (i % 571 == 0):\n sendEnergyUsed = sendEnergyUsed + 1\n if sendEnergyUsed > 8:\n sendEnergyUsed = 0\n\n if (i == 1000000):\n i = 0\n\n print ENERGY_STORED[sendEnergyStored] + ENERGY_USED[sendEnergyUsed]\n print WATER_STORED[sendEnergyStored] + WATER_USED[sendEnergyUsed]\n if(ser.isOpen()):\n ser.write(ENERGY_STORED[sendEnergyStored] + ENERGY_USED[sendEnergyUsed])\n ser.write(WATER_STORED[sendEnergyStored] + WATER_USED[sendEnergyUsed])\n ser.close()\n" }, { "alpha_fraction": 0.5257256031036377, "alphanum_fraction": 0.5600264072418213, "avg_line_length": 27.60377311706543, "blob_id": "e89f317fdd69de3c0cb29a0d7033fdffb06bc6d8", "content_id": "9d67d54ee35f57a13cb25152d493a8607434554b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1516, "license_type": "no_license", "max_line_length": 79, "num_lines": 53, "path": "/display-inside-temp", "repo_name": "Amperture/nexsmart-sensebar-server", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport Adafruit_BBIO.UART as uart\nimport serial\nfrom time import sleep\nfrom time import strftime as time\nimport subprocess\n\nimport json\nimport pycurl\nfrom StringIO import StringIO\n\nuart.setup(\"UART1\")\nser=serial.Serial(port = \"/dev/ttyO1\", baudrate=9600)\nproc = subprocess.Popen(['hcidump --raw '], stdout=subprocess.PIPE, shell=True)\n\nser.close()\nser.open()\n\nTIME_MINUTES_TENS = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nTIME_MINUTES_ONES = [ ')', '!', '@', '#', '$', '%', '^', '&', '*', '(']\nTEMP_IN_TENS = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']\nTEMP_IN_ONES = [ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']\n\ntemperature = 0\n#ser.write(temp_Write)\n\nfor x in range(0, 10):\n puck_output = proc.stdout.readline()\n if \"84 29\" in puck_output and \">\" not in puck_output:\n temperature = ((int(puck_output.split()[8], 16) << 8) \\\n + int(puck_output.split()[7], 16))/10.0\n\n temperature = (temperature*18 +320)/10\n\n if temperature - (int(temperature)) >= 0.5:\n temperature = int(temperature) + 1\n else: \n temperature = int(temperature)\n\n # Code for breadboard\n temp_Write = TIME_MINUTES_TENS[temperature / 10] \\\n + TIME_MINUTES_ONES[(temperature) % 10]\n\n # Code for final production.\n #temp_Write = TEMP_IN_TENS[temperature / 10] \\\n #+ TEMP_IN_ONES[(temperature) % 10]\n\nif ser.isOpen():\n ser.write(temp_Write)\n ser.close()\n\nproc.kill()\n" } ]
6
gunnkh/project_lede_12
https://github.com/gunnkh/project_lede_12
d6873f11a829670a7cabe89b57a6b0c17e230dab
2f7aa42debf7820b34c1e46738f0ea6f43089309
07eddb0612af63e2f7cea664e28d4ea3310fd7f3
refs/heads/master
2021-01-10T21:42:58.141036
2015-08-25T18:27:42
2015-08-25T18:27:42
41,380,423
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6284263730049133, "alphanum_fraction": 0.6365482211112976, "avg_line_length": 19.93617057800293, "blob_id": "5953503e8becf6a2c61067bf1517e0804cbcbc02", "content_id": "86ea81820c9370b2cd9b7f2b0e6cdfc45c44a2b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 988, "license_type": "no_license", "max_line_length": 103, "num_lines": 47, "path": "/h_both.py", "repo_name": "gunnkh/project_lede_12", "src_encoding": "UTF-8", "text": "\n# import csv\n\n# # donors = {\n# # 'donor': 'Sole Invest', \n# # 'amount': '200000'\n# # }\n\n\n# donors = list(csv.DictReader(open(\"/Users/gunnkh/Desktop/h_donor.csv\", 'rU'), dialect=csv.excel_tab))\n\nimport pandas as pd\ndonors = pd.read_csv( \"/Users/gunnkh/Desktop/lede/h_donors_all.csv\" )\n\n#print donors\n#[d for d in donors]\n#print d\n\n# def h_donor(donor):\n# donor = donors['donor']\n# return donors \n\n\n# def h_amount(donor):\n# amount = donors['amount']\n# return donors['amount']\n\n#print donors\n\n#for s in ( donors['donor']+\" ga \"+donors['amount'].astype( str ) ).values:\n# print s,\"kroner til Høyres valgkamp.\"\n\n\ndef h_sentence(d):\n donor = d['donor']\n amount = d['amount']\n return \"{0} ga {1} kroner til Høyres valgkamp.\".format(donor, amount)\n\n#def h_sentence(d):\n# return \"{donor} ga {amount} kroner til Høyres valgkamp\".format(**d)\n\n# #print h_sentence(donors)\n\n# for d in donors:\n# print h_sentence(d)\n\nfor index, d in donors.iterrows():\n print h_sentence(d)\n" }, { "alpha_fraction": 0.6645746231079102, "alphanum_fraction": 0.6875871419906616, "avg_line_length": 25.537036895751953, "blob_id": "11369de70aa0767e18e9b9638a93f185d9957794", "content_id": "5f420adc16c5762552c3e52c08c2c5021ef14a7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1434, "license_type": "no_license", "max_line_length": 107, "num_lines": 54, "path": "/bot_test.py", "repo_name": "gunnkh/project_lede_12", "src_encoding": "UTF-8", "text": "import csv\nimport codecs\nimport os\n\n# Read in the old content\n\nif os.path.isfile(\"/Users/gunnkh/Desktop/lede/test_both_file.txt\"):\n\twith codecs.open(\"/Users/gunnkh/Desktop/lede/test_both_file.txt\", 'rU', encoding=\"utf-8\") as inputfile:\n\t\told_lines = [s.strip() for s in inputfile.readlines()]\nelse:\n\told_lines = []\n\n# scraper - Do whatever to create new_lines\nimport urllib2\nfrom bs4 import BeautifulSoup\nurl = 'http://hoyre.no/Presse/%C3%98konomiske-bidrag/%C3%98konomiske-bidrag-2015'\nconnection = urllib2.urlopen(url)\npage = BeautifulSoup(connection.read())\ndiv_tag = page.find('div', attrs={'class':'articlebody small-12 medium-12 large-12 large-centered column'})\n\np_tag = div_tag.find_all('p')\n\ndonor15 = []\nfor p in p_tag:\n donors = p.getText()\n\n donor15.append(donors)\n# print donor15\n\n\n# new line is list of sentencenses\n#new_lines = ['mm','ss']\ndonor15\n\n# Find all of the changes\nchanges = [line for line in donor15 if line not in old_lines]\n\nprint \"Changes are\"\nprint changes\n\n# Save new lines to the output file\nwith codecs.open(\"/Users/gunnkh/Desktop/lede/test_both_file.txt\", \"wb\", encoding=\"utf-8\") as outfile:\n for line in donor15:\n # For some reason you need to add a newline so\n # it doesn't all run together\n outfile.write(line + '\\n')\n\n\n# import csv\n\n# with open(\"input.txt\") as file: \n# reader = csv.reader(file, delimiter=' ')\n# for row in reader:\n# # print row\n\n" }, { "alpha_fraction": 0.6297029852867126, "alphanum_fraction": 0.6653465628623962, "avg_line_length": 17.703702926635742, "blob_id": "969288c8738caabc6ac0c9353b7865a2cfbd0429", "content_id": "211447ad571851ced2dca83d7d92cd18c128ba21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 107, "num_lines": 27, "path": "/setup_h_both.py", "repo_name": "gunnkh/project_lede_12", "src_encoding": "UTF-8", "text": "$ crontab -e\n\n5****\n\ncron.txt file\n\npython ~/\n\n\n\nurl = 'http://hoyre.no/Presse/%C3%98konomiske-bidrag/%C3%98konomiske-bidrag-2015'\nconnection = urllib2.urlopen(url)\npage = BeautifulSoup(connection.read())\n\ndiv_tag = page.find('div', attrs={'class':'articlebody small-12 medium-12 large-12 large-centered column'})\n\n#donors = []\n#for p in p_tag:\np_tag = div_tag.find_all('p')\n\n# print p.getText()\n# donors.append( p.getText() )\n#print p_tag\n\nfor p in p_tag:\n donors = p.getText()\n print donors\n" }, { "alpha_fraction": 0.7114846110343933, "alphanum_fraction": 0.7114846110343933, "avg_line_length": 28.66666603088379, "blob_id": "3c903a8dbaded290f094413b6fac95d8346bc1eb", "content_id": "32a6c36b5fffd32875ba050582f36b8e0f1abe32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 714, "license_type": "no_license", "max_line_length": 68, "num_lines": 24, "path": "/email.py", "repo_name": "gunnkh/project_lede_12", "src_encoding": "UTF-8", "text": "# Import smtplib for the actual sending function\nimport smtplib\n\n# Import the email modules we'll need\nfrom email.mime import Text\n\n# Open a plain text file for reading. For this example, assume that\n# the text file contains only ASCII characters.\nfp = open('/Users/gunnkh/Desktop/lede/test_both_file.txt', 'rb')\n# Create a text/plain message\nmsg = Text.MIMEText(fp.read())\nfp.close()\n\n# me == the sender's email address\n# you == the recipient's email address\nmsg['Test'] = 'The contents of %s' % textfile\nmsg['[email protected]'] = me\nmsg['[email protected]'] = you\n\n# Send the message via our own SMTP server, but don't include the\n# envelope header.\ns = smtplib.SMTP('localhost')\ns.sendmail(me, [you], msg.as_string())\ns.quit()\n\n\n" } ]
4
leemun1/blog-dj
https://github.com/leemun1/blog-dj
afa4d217b23747caa03c7fdfb00b488921da2a7c
6760d67f719c46d08880722f14c5e144b4ad28f9
befd3fc681236b5adf3c2147ae62ed4810ee3edd
refs/heads/master
2021-01-16T20:24:33.195532
2017-08-13T20:47:51
2017-08-13T20:47:51
100,203,793
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.646835446357727, "alphanum_fraction": 0.649367094039917, "avg_line_length": 30.197368621826172, "blob_id": "0d37a9bf30eb7f1dfbbcc31afb6623ec48aaf1cf", "content_id": "def853971dfabd22534979b8489c4391c2af5ef1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2370, "license_type": "no_license", "max_line_length": 75, "num_lines": 76, "path": "/blogs/views.py", "repo_name": "leemun1/blog-dj", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import Post\nfrom .forms import PostForm\n\ndef index(request):\n \"\"\"The home page for Blog.\"\"\"\n return render(request, 'blogs/index.html')\n\n@login_required\ndef posts(request):\n \"\"\"Show all blog posts.\"\"\"\n posts = Post.objects.filter(owner=request.user).order_by('-date_added')\n context = {'posts': posts}\n return render(request, 'blogs/posts.html', context)\n\n@login_required\ndef post(request, post_id):\n \"\"\"Show a single post.\"\"\"\n post = Post.objects.get(id=post_id)\n check_post_user(post, request)\n context = {'post': post}\n return render(request, 'blogs/post.html', context)\n\n@login_required\ndef new_post(request):\n \"\"\"Add a new post.\"\"\"\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form = PostForm()\n else:\n # POST data submitted; process data.\n form = PostForm(request.POST)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.owner = request.user\n new_post.save()\n return HttpResponseRedirect(reverse('blogs:posts'))\n\n context = {'form': form}\n return render(request, 'blogs/new_post.html', context)\n\n@login_required\ndef edit_post(request, post_id):\n \"\"\"Edit an existing post.\"\"\"\n post = Post.objects.get(id=post_id)\n check_post_user(post, request)\n\n if request.method != 'POST':\n # Initial request; pre-fill form with the current entry.\n form = PostForm(instance=post)\n else:\n # POST data submitted; process data.\n form = PostForm(instance=post, data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(\n reverse('blogs:post', args=[post.id]))\n\n context = {'post': post, 'form': form}\n return render(request, 'blogs/edit_post.html', context)\n\n@login_required\ndef delete_post(request, post_id):\n \"\"\"Delete an existing post.\"\"\"\n post = Post.objects.get(id=post_id)\n check_post_user(post, request)\n post.delete()\n return HttpResponseRedirect(reverse('blogs:posts'))\n\ndef check_post_user(post, request):\n if post.owner != request.user:\n raise Http404" }, { "alpha_fraction": 0.5828402638435364, "alphanum_fraction": 0.5828402638435364, "avg_line_length": 24.074073791503906, "blob_id": "28cb8b24fbfa71637c68aff45e4729a3d7387336", "content_id": "2b6d3207be9c09f5c8fbb1b55dc77c1c13d22d25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 676, "license_type": "no_license", "max_line_length": 63, "num_lines": 27, "path": "/blogs/urls.py", "repo_name": "leemun1/blog-dj", "src_encoding": "UTF-8", "text": "\"\"\"Defines URL patterns for blogs\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n # Home page\n url(r'^$', views.index, name='index'),\n\n # Show all posts.\n url(r'^posts/$', views.posts, name='posts'),\n\n # Detail page for a single post\n url(r'^posts/(?P<post_id>\\d+)/$', views.post, name='post'),\n\n # Page for adding a new post\n url(r'^new_post/$', views.new_post, name='new_post'),\n\n # Page for editing a post\n url(r'^edit_post/(?P<post_id>\\d+)/$', views.edit_post, \n name='edit_post'),\n \n # Page for deleting a post\n url(r'^delete/(?P<post_id>\\d+)/$', views.delete_post, \n name='delete_post'),\n]" }, { "alpha_fraction": 0.5168269276618958, "alphanum_fraction": 0.5961538553237915, "avg_line_length": 20.894737243652344, "blob_id": "3c287ca47ac648b55d54b43b3f3a2ac0e1ed5b1e", "content_id": "9ed43981784b9cd5733ea72a7e695e5b0002ec1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 416, "license_type": "no_license", "max_line_length": 53, "num_lines": 19, "path": "/blogs/migrations/0003_auto_20170812_0332.py", "repo_name": "leemun1/blog-dj", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.4 on 2017-08-12 07:32\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blogs', '0002_auto_20170812_0331'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='post',\n options={'verbose_name_plural': 'posts'},\n ),\n ]\n" } ]
3
JoeMoggridge/text_reordering
https://github.com/JoeMoggridge/text_reordering
3803153b127d612cbe197d66163c38b456fdd922
611b8192da1801cbac08dce67c69570b1a554766
06eee500473ec0c120a303db20e80c6b5db92452
refs/heads/master
2020-05-26T20:27:40.961159
2017-09-10T21:06:33
2017-09-10T21:06:33
82,502,443
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8103448152542114, "alphanum_fraction": 0.8103448152542114, "avg_line_length": 28, "blob_id": "44474846f527c48ab1f232181d73d118a1d3afa3", "content_id": "fb94abd74e2aedca7e144efd192c4a7b95767dd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 58, "license_type": "no_license", "max_line_length": 39, "num_lines": 2, "path": "/README.md", "repo_name": "JoeMoggridge/text_reordering", "src_encoding": "UTF-8", "text": "# text_reordering\npython project to re-order text strings\n" }, { "alpha_fraction": 0.5166858434677124, "alphanum_fraction": 0.5306591987609863, "avg_line_length": 37.025001525878906, "blob_id": "1caa38cef0a589c858b6a0e831fb4b9668ad52a8", "content_id": "9a6ea275c5d28ab3c400c17c7090bf78a6eaf170", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6083, "license_type": "no_license", "max_line_length": 149, "num_lines": 160, "path": "/main.py", "repo_name": "JoeMoggridge/text_reordering", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport re\n\n\ndef process_line (inputline) :\n\n#(((?#optionalerrorcode)\\\\+(.+))*)(.*)\n#'Logging\\\\.Failure\\\\(((?#errortext).+)(.*)\\\\+((?#txt).+)\\\\+((?#value).+)\\\\+((?#txt).+)\\\\+((?#value).+)\\\\);'\n MyRegex1 = re.compile('(.*?)Logging\\\\.Failure\\\\((.{5,150}?)(\\\\+(.{5,50}))?\\\\+(.{5,50}?)\\\\+(.{5,50}?)\\\\+(.{5,50}?)\\\\+(.{5,50}?)\\\\+(.{3,50}?)\\\\);')\n searchforstart = re.compile('(.*?)Logging\\\\.Failure\\\\((?P<msgError>.{5,150}?)\\\\+((?P<valError>.{5,50}?)|(.+?))(?=\\\\+)')\n searchforexpected = re.compile('\\\\+(.{1,10})((E|e)xpected (((V|v)alue)|(.{2,10})))(.{1,10})\\\\+(?P<expectedvariable>.{3,50}?)(\\\\+|\\\\))')\n searchforactual = re.compile ('\\\\+(.{1,10})((A|a)ctual (((V|v)alue)|(.{2,10})))(.{1,10})\\\\+(?P<actualvariable>.{3,50}?)(\\\\+|\\\\))')\n\n matchedline = re.search(MyRegex1, inputline)\n if(not matchedline):\n return \"line does not match\"\n\n m_startbit = re.search(searchforstart, matchedline.group())\n\n if m_startbit :\n\n m_expected = re.search(searchforexpected, matchedline.group())\n if m_expected :\n s_expected = m_expected.group('expectedvariable')\n\n m_actual = re.search(searchforactual, matchedline.group())\n if m_actual :\n s_actual = m_actual.group('actualvariable')\n\n #if the error mesage contains a value the we need to add a quote. if not then we need to not add a quote\n if m_startbit.group('valError')!= None:\n outputline = m_startbit.group() + \"+ \\\" \\'\"+\" \\\", \" + s_expected + \" , \" + s_actual + \" );\"\n else:\n outputline = m_startbit.group() + \" , \" + s_expected + \" , \" + s_actual + \" );\"\n\n return outputline\n\n return \"line processing failed\"\n\ndef stringify (inputline):\n #adds .ToString() to variables, if required\n MyRegEx2 = re.compile('(?P<Whitespace>.*?)Logging\\\\.Failure\\\\((?P<msgError>.{5,100})?,(?P<valExpected>.{5,30})?,(?P<valActual>.{5,30})?\\\\);')\n\n MyResult = re.search(MyRegEx2, inputline)\n\n if MyResult:\n\n s_matchedexpected = MyResult.group('valExpected')\n s_matchedactual = MyResult.group('valActual')\n s_whitespace = MyResult.group('Whitespace')\n\n # first remove whitespace\n s_matchedexpected = s_matchedexpected.strip()\n s_matchedactual = s_matchedactual.strip()\n\n #if either of the matched arguments do not start with \"str\", then add the \".ToString()\" method on the endof both\n if (not re.match('str', s_matchedexpected)) or (not re.match('str', s_matchedactual)):\n #if either of the matched arguments do not start with 'int' then we really are going to have to add '.ToString()' to the end.\n if (not re.match('int', s_matchedexpected)) or (not re.match('int', s_matchedactual)):\n s_matchedexpected = s_matchedexpected+\".ToString()\"\n s_matchedactual = s_matchedactual+\".ToString()\"\n\n return s_whitespace+ \"Logging.Failure(\" + MyResult.group('msgError') +\" , \" + s_matchedexpected + \" , \" + s_matchedactual + \");\"\n\n #else\n return \"no match on this line\"\n\n# main\n#==============\n\n#firstly, reorder the arguments\n\nwith open(\"input.txt\", \"r\") as InFile:\n with open(\"TextReordering_tmp.txt\", \"w\") as OutFile:\n\n print(\"---> Input file succesfully opened.\\n\")\n line_num =0\n doesnotmatchflag = False\n linecounter = 0\n\n for line in InFile:\n\n line_num += 1\n\n\n if line_num > 0 :\n\n #1) process line\n result = process_line(line)\n\n if result == \"line processing failed\" :\n # first deal with previous lines whichmight be duplicates\n if linecounter > 0:\n print( \"Re-order: ... \" + str(linecounter) + \"lines did not match the pattern\")\n linecounter=0\n #next, log the current message:\n print(\"Reorder: for line \" +str(line_num)+ \", processing failed!\")\n OutFile.write(line)\n\n\n elif result == \"line does not match\" :\n OutFile.write(line)\n\n if linecounter == 0: #this line is not a duplicate, log it\n print(\"Re-order: line \" + str(line_num) + \": does not match\")\n\n # this is a duplicate, dont log it\n linecounter += 1\n\n\n\n else:\n #first deal with previous lines whichmight be duplicates\n if linecounter > 0:\n print( \"Re-order: ... \" + str(linecounter) + \" lines did not match\")\n linecounter=0\n # next, log the current message:\n print(\"Re-order: line \" + str(line_num) + \" overwriting with: \"+ result)\n OutFile.write(result+\"\\n\")\n\n OutFile.close()\nInFile.close()\n\n# next, add .Tostring(), if required\nwith open(\"TextReordering_tmp.txt\", \"r\") as InFile:\n with open(\"output.txt\", \"w\") as OutFile:\n\n line_num = 0\n\n for stringy_ln in InFile:\n\n line_num += 1\n\n #2) stringify\n result= stringify(stringy_ln)\n\n if result== \"no match on this line\":\n OutFile.write(stringy_ln)\n\n if linecounter == 0: # this line is not a duplicate, log it\n print(\"stringify: line \" + str(line_num) + \": does not match \")\n\n # this is a duplicate, dont log it\n linecounter += 1\n\n else:\n # first deal with previous lines whichmight be duplicates\n if linecounter > 0:\n print(\"stringify: ... \" + str(linecounter) + \" lines did not match\")\n linecounter = 0\n\n # next, log the current message:\n OutFile.write(result+\"\\n\")\n print(\"stringify: line \" + str(line_num) + \" overwriting with: \" + result)\n\n #cleanup\n OutFile.close()\nInFile.close()\nos.remove(\"TextReordering_tmp.txt\")" } ]
2
xp-vit/flask-babelex
https://github.com/xp-vit/flask-babelex
bc9efc4176dc00b51a3c26f61fa0aa58b6eb38ab
03c9856ff989ff353c8e4ffca1f2fbd2f6fb0b74
836dd9c4f439b5c426b561b6069eb609c4780943
refs/heads/master
2022-06-09T11:33:41.177461
2020-02-07T15:16:12
2020-02-07T15:16:12
260,752,555
0
0
NOASSERTION
2020-05-02T18:43:49
2020-04-24T03:58:13
2020-03-25T19:56:01
null
[ { "alpha_fraction": 0.630630612373352, "alphanum_fraction": 0.7207207083702087, "avg_line_length": 14.857142448425293, "blob_id": "00b6764dd876a1bf99399dda9e786b3f9d499f77", "content_id": "44c894bbde2154d6b440393ce5b6e03f6dc730e3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 111, "license_type": "permissive", "max_line_length": 26, "num_lines": 7, "path": "/tox.ini", "repo_name": "xp-vit/flask-babelex", "src_encoding": "UTF-8", "text": "[tox]\nenvlist = py26, py27, py33\n\n[testenv]\ndeps = pytz>=2013a\nwhitelist_externals = make\ncommands = make test\n" }, { "alpha_fraction": 0.6350148320198059, "alphanum_fraction": 0.6486647129058838, "avg_line_length": 28.05172348022461, "blob_id": "6d07dff83fc2dc95654f15ccffb15faa935357a6", "content_id": "abffdf3c3500cee657f6b8d730d0f84992a2e43d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1685, "license_type": "permissive", "max_line_length": 85, "num_lines": 58, "path": "/setup.py", "repo_name": "xp-vit/flask-babelex", "src_encoding": "UTF-8", "text": "\"\"\"\nFlask-BabelEx\n-------------\n\nAdds i18n/l10n support to Flask applications with the help of the\n`Babel`_ library.\n\nThis is fork of official Flask-Babel extension with following features:\n\n1. It is possible to use multiple language catalogs in one Flask application;\n2. Localization domains: your extension can package localization file(s) and use them\n if necessary;\n3. Does not reload localizations for each request.\n\nLinks\n`````\n\n* `documentation <http://packages.python.org/Flask-BabelEx>`_\n* `development version\n <http://github.com/mrjoes/flask-babelex/zipball/master#egg=Flask-BabelEx-dev>`_\n* `original Flask-Babel extension <https://pypi.python.org/pypi/Flask-Babel>`_.\n\n.. _Babel: http://babel.edgewall.org/\n\n\"\"\"\nfrom setuptools import setup\n\n\nsetup(\n name='Flask-BabelEx',\n version='0.9.4',\n url='http://github.com/mrjoes/flask-babelex',\n license='BSD',\n author='Serge S. Koval',\n author_email='[email protected]',\n description='Adds i18n/l10n support to Flask applications',\n long_description=__doc__,\n packages=['flask_babelex'],\n zip_safe=False,\n platforms='any',\n install_requires=[\n 'Flask',\n 'Babel>=1.0',\n 'speaklater>=1.2',\n 'Jinja2>=2.5'\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n)\n" } ]
2